summaryrefslogtreecommitdiff
path: root/drivers/misc
diff options
context:
space:
mode:
authorSrikant Patnaik2015-01-11 12:28:04 +0530
committerSrikant Patnaik2015-01-11 12:28:04 +0530
commit871480933a1c28f8a9fed4c4d34d06c439a7a422 (patch)
tree8718f573808810c2a1e8cb8fb6ac469093ca2784 /drivers/misc
parent9d40ac5867b9aefe0722bc1f110b965ff294d30d (diff)
downloadFOSSEE-netbook-kernel-source-871480933a1c28f8a9fed4c4d34d06c439a7a422.tar.gz
FOSSEE-netbook-kernel-source-871480933a1c28f8a9fed4c4d34d06c439a7a422.tar.bz2
FOSSEE-netbook-kernel-source-871480933a1c28f8a9fed4c4d34d06c439a7a422.zip
Moved, renamed, and deleted files
The original directory structure was scattered and unorganized. Changes are basically to make it look like kernel structure.
Diffstat (limited to 'drivers/misc')
-rw-r--r--drivers/misc/Kconfig549
-rw-r--r--drivers/misc/Makefile62
-rw-r--r--drivers/misc/ab8500-pwm.c169
-rw-r--r--drivers/misc/ad525x_dpot-i2c.c121
-rw-r--r--drivers/misc/ad525x_dpot-spi.c143
-rw-r--r--drivers/misc/ad525x_dpot.c771
-rw-r--r--drivers/misc/ad525x_dpot.h215
-rw-r--r--drivers/misc/akm8975.c732
-rw-r--r--drivers/misc/altera-stapl/Kconfig8
-rw-r--r--drivers/misc/altera-stapl/Makefile3
-rw-r--r--drivers/misc/altera-stapl/altera-comp.c142
-rw-r--r--drivers/misc/altera-stapl/altera-exprt.h33
-rw-r--r--drivers/misc/altera-stapl/altera-jtag.c1021
-rw-r--r--drivers/misc/altera-stapl/altera-jtag.h113
-rw-r--r--drivers/misc/altera-stapl/altera-lpt.c70
-rw-r--r--drivers/misc/altera-stapl/altera.c2537
-rw-r--r--drivers/misc/apds9802als.c339
-rw-r--r--drivers/misc/apds990x.c1286
-rw-r--r--drivers/misc/arm-charlcd.c396
-rw-r--r--drivers/misc/atmel-ssc.c177
-rw-r--r--drivers/misc/atmel_pwm.c410
-rw-r--r--drivers/misc/atmel_tclib.c207
-rw-r--r--drivers/misc/bcm2079x-i2c.c736
-rw-r--r--drivers/misc/bh1770glc.c1406
-rw-r--r--drivers/misc/bh1780gli.c263
-rw-r--r--drivers/misc/bmp085.c472
-rw-r--r--drivers/misc/c2port/Kconfig35
-rw-r--r--drivers/misc/c2port/Makefile3
-rw-r--r--drivers/misc/c2port/c2port-duramar2150.c159
-rw-r--r--drivers/misc/c2port/core.c1006
-rw-r--r--drivers/misc/carma/Kconfig17
-rw-r--r--drivers/misc/carma/Makefile2
-rw-r--r--drivers/misc/carma/carma-fpga-program.c1140
-rw-r--r--drivers/misc/carma/carma-fpga.c1447
-rw-r--r--drivers/misc/cb710/Kconfig25
-rw-r--r--drivers/misc/cb710/Makefile6
-rw-r--r--drivers/misc/cb710/core.c359
-rw-r--r--drivers/misc/cb710/debug.c118
-rw-r--r--drivers/misc/cb710/sgbuf2.c146
-rw-r--r--drivers/misc/cs5535-mfgpt.c351
-rw-r--r--drivers/misc/ds1682.c257
-rw-r--r--drivers/misc/eeprom/Kconfig98
-rw-r--r--drivers/misc/eeprom/Makefile7
-rw-r--r--drivers/misc/eeprom/at24.c707
-rw-r--r--drivers/misc/eeprom/at25.c413
-rw-r--r--drivers/misc/eeprom/digsy_mtc_eeprom.c85
-rw-r--r--drivers/misc/eeprom/eeprom.c238
-rw-r--r--drivers/misc/eeprom/eeprom_93cx6.c321
-rw-r--r--drivers/misc/eeprom/eeprom_93xx46.c400
-rw-r--r--drivers/misc/eeprom/max6875.c215
-rw-r--r--drivers/misc/enclosure.c570
-rw-r--r--drivers/misc/ep93xx_pwm.c385
-rw-r--r--drivers/misc/fsa9480.c546
-rw-r--r--drivers/misc/hmc6352.c155
-rw-r--r--drivers/misc/hpilo.c889
-rw-r--r--drivers/misc/hpilo.h212
-rw-r--r--drivers/misc/ibmasm/Makefile15
-rw-r--r--drivers/misc/ibmasm/command.c187
-rw-r--r--drivers/misc/ibmasm/dot_command.c152
-rw-r--r--drivers/misc/ibmasm/dot_command.h78
-rw-r--r--drivers/misc/ibmasm/event.c177
-rw-r--r--drivers/misc/ibmasm/heartbeat.c101
-rw-r--r--drivers/misc/ibmasm/i2o.h77
-rw-r--r--drivers/misc/ibmasm/ibmasm.h220
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c630
-rw-r--r--drivers/misc/ibmasm/lowlevel.c85
-rw-r--r--drivers/misc/ibmasm/lowlevel.h137
-rw-r--r--drivers/misc/ibmasm/module.c237
-rw-r--r--drivers/misc/ibmasm/r_heartbeat.c99
-rw-r--r--drivers/misc/ibmasm/remote.c282
-rw-r--r--drivers/misc/ibmasm/remote.h270
-rw-r--r--drivers/misc/ibmasm/uart.c72
-rw-r--r--drivers/misc/ics932s401.c495
-rw-r--r--drivers/misc/ioc4.c502
-rw-r--r--drivers/misc/isl29003.c463
-rw-r--r--drivers/misc/isl29020.c237
-rw-r--r--drivers/misc/iwmc3200top/Kconfig20
-rw-r--r--drivers/misc/iwmc3200top/Makefile29
-rw-r--r--drivers/misc/iwmc3200top/debugfs.c137
-rw-r--r--drivers/misc/iwmc3200top/debugfs.h58
-rw-r--r--drivers/misc/iwmc3200top/fw-download.c358
-rw-r--r--drivers/misc/iwmc3200top/fw-msg.h113
-rw-r--r--drivers/misc/iwmc3200top/iwmc3200top.h205
-rw-r--r--drivers/misc/iwmc3200top/log.c348
-rw-r--r--drivers/misc/iwmc3200top/log.h171
-rw-r--r--drivers/misc/iwmc3200top/main.c662
-rw-r--r--drivers/misc/kgdbts.c1187
-rw-r--r--drivers/misc/lis3lv02d/Kconfig37
-rw-r--r--drivers/misc/lis3lv02d/Makefile7
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.c1054
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.h294
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d_i2c.c263
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d_spi.c134
-rw-r--r--drivers/misc/lkdtm.c695
-rw-r--r--drivers/misc/max8997-muic.c495
-rwxr-xr-xdrivers/misc/mediatek/Kconfig9
-rwxr-xr-xdrivers/misc/mediatek/Makefile3
-rwxr-xr-xdrivers/misc/mediatek/combo_mt66xx/Kconfig35
-rwxr-xr-xdrivers/misc/mediatek/combo_mt66xx/Makefile39
-rwxr-xr-xdrivers/misc/mediatek/combo_mt66xx/bt/Makefile41
-rwxr-xr-xdrivers/misc/mediatek/combo_mt66xx/bt/uhid/Makefile46
-rwxr-xr-xdrivers/misc/mediatek/combo_mt66xx/fm/Makefile72
-rwxr-xr-xdrivers/misc/mediatek/combo_mt66xx/fm/private/Makefile44
-rwxr-xr-xdrivers/misc/mediatek/combo_mt66xx/gps/Makefile50
-rwxr-xr-xdrivers/misc/mediatek/combo_mt66xx/wmt/Makefile78
-rwxr-xr-xdrivers/misc/mediatek/dummy.c0
-rw-r--r--drivers/misc/pch_phub.c914
-rw-r--r--drivers/misc/phantom.c571
-rwxr-xr-xdrivers/misc/pn547.c1180
-rw-r--r--drivers/misc/pti.c996
-rwxr-xr-xdrivers/misc/rsa/Kconfig6
-rwxr-xr-xdrivers/misc/rsa/Makefile8
-rwxr-xr-xdrivers/misc/rsa/asn1.h244
-rwxr-xr-xdrivers/misc/rsa/base64.h87
-rwxr-xr-xdrivers/misc/rsa/bignum.c2132
-rwxr-xr-xdrivers/misc/rsa/bignum.h763
-rwxr-xr-xdrivers/misc/rsa/dhm.h153
-rwxr-xr-xdrivers/misc/rsa/pem.h100
-rwxr-xr-xdrivers/misc/rsa/rsa.h373
-rwxr-xr-xdrivers/misc/rsa/rsa_verify.c572
-rwxr-xr-xdrivers/misc/rsa/x509.h726
-rw-r--r--drivers/misc/sgi-gru/Makefile5
-rw-r--r--drivers/misc/sgi-gru/gru.h78
-rw-r--r--drivers/misc/sgi-gru/gru_instructions.h736
-rw-r--r--drivers/misc/sgi-gru/grufault.c902
-rw-r--r--drivers/misc/sgi-gru/grufile.c618
-rw-r--r--drivers/misc/sgi-gru/gruhandles.c216
-rw-r--r--drivers/misc/sgi-gru/gruhandles.h531
-rw-r--r--drivers/misc/sgi-gru/grukdump.c235
-rw-r--r--drivers/misc/sgi-gru/grukservices.c1162
-rw-r--r--drivers/misc/sgi-gru/grukservices.h214
-rw-r--r--drivers/misc/sgi-gru/grulib.h153
-rw-r--r--drivers/misc/sgi-gru/grumain.c973
-rw-r--r--drivers/misc/sgi-gru/gruprocfs.c381
-rw-r--r--drivers/misc/sgi-gru/grutables.h678
-rw-r--r--drivers/misc/sgi-gru/grutlbpurge.c378
-rw-r--r--drivers/misc/sgi-xp/Makefile19
-rw-r--r--drivers/misc/sgi-xp/xp.h358
-rw-r--r--drivers/misc/sgi-xp/xp_main.c286
-rw-r--r--drivers/misc/sgi-xp/xp_nofault.S35
-rw-r--r--drivers/misc/sgi-xp/xp_sn2.c190
-rw-r--r--drivers/misc/sgi-xp/xp_uv.c171
-rw-r--r--drivers/misc/sgi-xp/xpc.h1004
-rw-r--r--drivers/misc/sgi-xp/xpc_channel.c1011
-rw-r--r--drivers/misc/sgi-xp/xpc_main.c1344
-rw-r--r--drivers/misc/sgi-xp/xpc_partition.c541
-rw-r--r--drivers/misc/sgi-xp/xpc_sn2.c2462
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c1767
-rw-r--r--drivers/misc/sgi-xp/xpnet.c607
-rw-r--r--drivers/misc/spear13xx_pcie_gadget.c898
-rw-r--r--drivers/misc/ti-st/Kconfig17
-rw-r--r--drivers/misc/ti-st/Makefile6
-rw-r--r--drivers/misc/ti-st/st_core.c890
-rw-r--r--drivers/misc/ti-st/st_kim.c844
-rw-r--r--drivers/misc/ti-st/st_ll.c169
-rw-r--r--drivers/misc/ti_dac7512.c90
-rw-r--r--drivers/misc/tifm_7xx1.c454
-rw-r--r--drivers/misc/tifm_core.c368
-rw-r--r--drivers/misc/tsl2550.c462
-rw-r--r--drivers/misc/uid_stat.c156
-rwxr-xr-xdrivers/misc/viatel/Kconfig14
-rwxr-xr-xdrivers/misc/viatel/Makefile3
-rwxr-xr-xdrivers/misc/viatel/core.c49
-rwxr-xr-xdrivers/misc/viatel/core.h21
-rwxr-xr-xdrivers/misc/viatel/oem.c752
-rwxr-xr-xdrivers/misc/viatel/power.c340
-rwxr-xr-xdrivers/misc/viatel/sync.c2445
-rw-r--r--drivers/misc/vmw_balloon.c838
-rw-r--r--drivers/misc/wl127x-rfkill.c121
169 files changed, 68710 insertions, 0 deletions
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
new file mode 100644
index 00000000..cdd400f4
--- /dev/null
+++ b/drivers/misc/Kconfig
@@ -0,0 +1,549 @@
+#
+# Misc strange devices
+#
+
+menu "Misc devices"
+
+config SENSORS_LIS3LV02D
+ tristate
+ depends on INPUT
+ select INPUT_POLLDEV
+ default n
+
+config AD525X_DPOT
+ tristate "Analog Devices Digital Potentiometers"
+ depends on (I2C || SPI) && SYSFS
+ help
+ If you say yes here, you get support for the Analog Devices
+ AD5258, AD5259, AD5251, AD5252, AD5253, AD5254, AD5255
+ AD5160, AD5161, AD5162, AD5165, AD5200, AD5201, AD5203,
+ AD5204, AD5206, AD5207, AD5231, AD5232, AD5233, AD5235,
+ AD5260, AD5262, AD5263, AD5290, AD5291, AD5292, AD5293,
+ AD7376, AD8400, AD8402, AD8403, ADN2850, AD5241, AD5242,
+ AD5243, AD5245, AD5246, AD5247, AD5248, AD5280, AD5282,
+ ADN2860, AD5273, AD5171, AD5170, AD5172, AD5173, AD5270,
+ AD5271, AD5272, AD5274
+ digital potentiometer chips.
+
+ See Documentation/misc-devices/ad525x_dpot.txt for the
+ userspace interface.
+
+ This driver can also be built as a module. If so, the module
+ will be called ad525x_dpot.
+
+config AD525X_DPOT_I2C
+ tristate "support I2C bus connection"
+ depends on AD525X_DPOT && I2C
+ help
+ Say Y here if you have a digital potentiometers hooked to an I2C bus.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad525x_dpot-i2c.
+
+config AD525X_DPOT_SPI
+ tristate "support SPI bus connection"
+ depends on AD525X_DPOT && SPI_MASTER
+ help
+ Say Y here if you have a digital potentiometers hooked to an SPI bus.
+
+ If unsure, say N (but it's safe to say "Y").
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad525x_dpot-spi.
+
+config ATMEL_PWM
+ tristate "Atmel AT32/AT91 PWM support"
+ depends on HAVE_CLK
+ help
+ This option enables device driver support for the PWM channels
+ on certain Atmel processors. Pulse Width Modulation is used for
+ purposes including software controlled power-efficient backlights
+ on LCD displays, motor control, and waveform generation.
+
+config BCM2079X_NFC
+ tristate "bcm2079x NFC driver"
+ default m
+ ---help---
+ Say yes if you want bcm2079x Near Field Communication driver.
+ This is for i2c connected version. If unsure, say N here.
+
+config AB8500_PWM
+ bool "AB8500 PWM support"
+ depends on AB8500_CORE && ARCH_U8500
+ select HAVE_PWM
+ help
+ This driver exports functions to enable/disble/config/free Pulse
+ Width Modulation in the Analog Baseband Chip AB8500.
+ It is used by led and backlight driver to control the intensity.
+
+config ATMEL_TCLIB
+ bool "Atmel AT32/AT91 Timer/Counter Library"
+ depends on (AVR32 || ARCH_AT91)
+ help
+ Select this if you want a library to allocate the Timer/Counter
+ blocks found on many Atmel processors. This facilitates using
+ these blocks by different drivers despite processor differences.
+
+config ATMEL_TCB_CLKSRC
+ bool "TC Block Clocksource"
+ depends on ATMEL_TCLIB
+ default y
+ help
+ Select this to get a high precision clocksource based on a
+ TC block with a 5+ MHz base clock rate. Two timer channels
+ are combined to make a single 32-bit timer.
+
+ When GENERIC_CLOCKEVENTS is defined, the third timer channel
+ may be used as a clock event device supporting oneshot mode
+ (delays of up to two seconds) based on the 32 KiHz clock.
+
+config ATMEL_TCB_CLKSRC_BLOCK
+ int
+ depends on ATMEL_TCB_CLKSRC
+ prompt "TC Block" if ARCH_AT91RM9200 || ARCH_AT91SAM9260 || CPU_AT32AP700X
+ default 0
+ range 0 1
+ help
+ Some chips provide more than one TC block, so you have the
+ choice of which one to use for the clock framework. The other
+ TC can be used for other purposes, such as PWM generation and
+ interval timing.
+
+config IBM_ASM
+ tristate "Device driver for IBM RSA service processor"
+ depends on X86 && PCI && INPUT && EXPERIMENTAL
+ ---help---
+ This option enables device driver support for in-band access to the
+ IBM RSA (Condor) service processor in eServer xSeries systems.
+ The ibmasm device driver allows user space application to access
+ ASM (Advanced Systems Management) functions on the service
+ processor. The driver is meant to be used in conjunction with
+ a user space API.
+ The ibmasm driver also enables the OS to use the UART on the
+ service processor board as a regular serial port. To make use of
+ this feature serial driver support (CONFIG_SERIAL_8250) must be
+ enabled.
+
+ WARNING: This software may not be supported or function
+ correctly on your IBM server. Please consult the IBM ServerProven
+ website <http://www-03.ibm.com/systems/info/x86servers/serverproven/compat/us/>
+ for information on the specific driver level and support statement
+ for your IBM server.
+
+config PHANTOM
+ tristate "Sensable PHANToM (PCI)"
+ depends on PCI
+ help
+ Say Y here if you want to build a driver for Sensable PHANToM device.
+
+ This driver is only for PCI PHANToMs.
+
+ If you choose to build module, its name will be phantom. If unsure,
+ say N here.
+
+config INTEL_MID_PTI
+ tristate "Parallel Trace Interface for MIPI P1149.7 cJTAG standard"
+ depends on PCI
+ default n
+ help
+ The PTI (Parallel Trace Interface) driver directs
+ trace data routed from various parts in the system out
+ through an Intel Penwell PTI port and out of the mobile
+ device for analysis with a debugging tool (Lauterbach or Fido).
+
+ You should select this driver if the target kernel is meant for
+ an Intel Atom (non-netbook) mobile device containing a MIPI
+ P1149.7 standard implementation.
+
+config SGI_IOC4
+ tristate "SGI IOC4 Base IO support"
+ depends on PCI
+ ---help---
+ This option enables basic support for the IOC4 chip on certain
+ SGI IO controller cards (IO9, IO10, and PCI-RT). This option
+ does not enable any specific functions on such a card, but provides
+ necessary infrastructure for other drivers to utilize.
+
+ If you have an SGI Altix with an IOC4-based card say Y.
+ Otherwise say N.
+
+config TIFM_CORE
+ tristate "TI Flash Media interface support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL && PCI
+ help
+ If you want support for Texas Instruments(R) Flash Media adapters
+ you should select this option and then also choose an appropriate
+ host adapter, such as 'TI Flash Media PCI74xx/PCI76xx host adapter
+ support', if you have a TI PCI74xx compatible card reader, for
+ example.
+ You will also have to select some flash card format drivers. MMC/SD
+ cards are supported via 'MMC/SD Card support: TI Flash Media MMC/SD
+ Interface support (MMC_TIFM_SD)'.
+
+ To compile this driver as a module, choose M here: the module will
+ be called tifm_core.
+
+config TIFM_7XX1
+ tristate "TI Flash Media PCI74xx/PCI76xx host adapter support (EXPERIMENTAL)"
+ depends on PCI && TIFM_CORE && EXPERIMENTAL
+ default TIFM_CORE
+ help
+ This option enables support for Texas Instruments(R) PCI74xx and
+ PCI76xx families of Flash Media adapters, found in many laptops.
+ To make actual use of the device, you will have to select some
+ flash card format drivers, as outlined in the TIFM_CORE Help.
+
+ To compile this driver as a module, choose M here: the module will
+ be called tifm_7xx1.
+
+config ICS932S401
+ tristate "Integrated Circuits ICS932S401"
+ depends on I2C && EXPERIMENTAL
+ help
+ If you say yes here you get support for the Integrated Circuits
+ ICS932S401 clock control chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called ics932s401.
+
+config ATMEL_SSC
+ tristate "Device driver for Atmel SSC peripheral"
+ depends on AVR32 || ARCH_AT91
+ ---help---
+ This option enables device driver support for Atmel Synchronized
+ Serial Communication peripheral (SSC).
+
+ The SSC peripheral supports a wide variety of serial frame based
+ communications, i.e. I2S, SPI, etc.
+
+ If unsure, say N.
+
+config ENCLOSURE_SERVICES
+ tristate "Enclosure Services"
+ default n
+ help
+ Provides support for intelligent enclosures (bays which
+ contain storage devices). You also need either a host
+ driver (SCSI/ATA) which supports enclosures
+ or a SCSI enclosure device (SES) to use these services.
+
+config SGI_XP
+ tristate "Support communication between SGI SSIs"
+ depends on NET
+ depends on (IA64_GENERIC || IA64_SGI_SN2 || IA64_SGI_UV || X86_UV) && SMP
+ select IA64_UNCACHED_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2
+ select GENERIC_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2
+ select SGI_GRU if X86_64 && SMP
+ ---help---
+ An SGI machine can be divided into multiple Single System
+ Images which act independently of each other and have
+ hardware based memory protection from the others. Enabling
+ this feature will allow for direct communication between SSIs
+ based on a network adapter and DMA messaging.
+
+config CS5535_MFGPT
+ tristate "CS5535/CS5536 Geode Multi-Function General Purpose Timer (MFGPT) support"
+ depends on PCI && X86 && MFD_CS5535
+ default n
+ help
+ This driver provides access to MFGPT functionality for other
+ drivers that need timers. MFGPTs are available in the CS5535 and
+ CS5536 companion chips that are found in AMD Geode and several
+ other platforms. They have a better resolution and max interval
+ than the generic PIT, and are suitable for use as high-res timers.
+ You probably don't want to enable this manually; other drivers that
+ make use of it should enable it.
+
+config CS5535_MFGPT_DEFAULT_IRQ
+ int
+ depends on CS5535_MFGPT
+ default 7
+ help
+ MFGPTs on the CS5535 require an interrupt. The selected IRQ
+ can be overridden as a module option as well as by driver that
+ use the cs5535_mfgpt_ API; however, different architectures might
+ want to use a different IRQ by default. This is here for
+ architectures to set as necessary.
+
+config CS5535_CLOCK_EVENT_SRC
+ tristate "CS5535/CS5536 high-res timer (MFGPT) events"
+ depends on GENERIC_CLOCKEVENTS && CS5535_MFGPT
+ help
+ This driver provides a clock event source based on the MFGPT
+ timer(s) in the CS5535 and CS5536 companion chips.
+ MFGPTs have a better resolution and max interval than the
+ generic PIT, and are suitable for use as high-res timers.
+
+config HP_ILO
+ tristate "Channel interface driver for the HP iLO processor"
+ depends on PCI
+ default n
+ help
+ The channel interface driver allows applications to communicate
+ with iLO management processors present on HP ProLiant servers.
+ Upon loading, the driver creates /dev/hpilo/dXccbN files, which
+ can be used to gather data from the management processor, via
+ read and write system calls.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hpilo.
+
+config SGI_GRU
+ tristate "SGI GRU driver"
+ depends on X86_UV && SMP
+ default n
+ select MMU_NOTIFIER
+ ---help---
+ The GRU is a hardware resource located in the system chipset. The GRU
+ contains memory that can be mmapped into the user address space. This memory is
+ used to communicate with the GRU to perform functions such as load/store,
+ scatter/gather, bcopy, AMOs, etc. The GRU is directly accessed by user
+ instructions using user virtual addresses. GRU instructions (ex., bcopy) use
+ user virtual addresses for operands.
+
+ If you are not running on a SGI UV system, say N.
+
+config SGI_GRU_DEBUG
+ bool "SGI GRU driver debug"
+ depends on SGI_GRU
+ default n
+ ---help---
+ This option enables addition debugging code for the SGI GRU driver. If
+ you are unsure, say N.
+
+config APDS9802ALS
+ tristate "Medfield Avago APDS9802 ALS Sensor module"
+ depends on I2C
+ help
+ If you say yes here you get support for the ALS APDS9802 ambient
+ light sensor.
+
+ This driver can also be built as a module. If so, the module
+ will be called apds9802als.
+
+config ISL29003
+ tristate "Intersil ISL29003 ambient light sensor"
+ depends on I2C && SYSFS
+ help
+ If you say yes here you get support for the Intersil ISL29003
+ ambient light sensor.
+
+ This driver can also be built as a module. If so, the module
+ will be called isl29003.
+
+config ISL29020
+ tristate "Intersil ISL29020 ambient light sensor"
+ depends on I2C
+ help
+ If you say yes here you get support for the Intersil ISL29020
+ ambient light sensor.
+
+ This driver can also be built as a module. If so, the module
+ will be called isl29020.
+
+config SENSORS_TSL2550
+ tristate "Taos TSL2550 ambient light sensor"
+ depends on I2C && SYSFS
+ help
+ If you say yes here you get support for the Taos TSL2550
+ ambient light sensor.
+
+ This driver can also be built as a module. If so, the module
+ will be called tsl2550.
+
+config SENSORS_BH1780
+ tristate "ROHM BH1780GLI ambient light sensor"
+ depends on I2C && SYSFS
+ help
+ If you say yes here you get support for the ROHM BH1780GLI
+ ambient light sensor.
+
+ This driver can also be built as a module. If so, the module
+ will be called bh1780gli.
+
+config SENSORS_BH1770
+ tristate "BH1770GLC / SFH7770 combined ALS - Proximity sensor"
+ depends on I2C
+ ---help---
+ Say Y here if you want to build a driver for BH1770GLC (ROHM) or
+ SFH7770 (Osram) combined ambient light and proximity sensor chip.
+
+ To compile this driver as a module, choose M here: the
+ module will be called bh1770glc. If unsure, say N here.
+
+config SENSORS_APDS990X
+ tristate "APDS990X combined als and proximity sensors"
+ depends on I2C
+ default n
+ ---help---
+ Say Y here if you want to build a driver for Avago APDS990x
+ combined ambient light and proximity sensor chip.
+
+ To compile this driver as a module, choose M here: the
+ module will be called apds990x. If unsure, say N here.
+
+config HMC6352
+ tristate "Honeywell HMC6352 compass"
+ depends on I2C
+ help
+ This driver provides support for the Honeywell HMC6352 compass,
+ providing configuration and heading data via sysfs.
+
+config SENSORS_AK8975
+ tristate "AK8975 compass support"
+ default n
+ depends on I2C
+ help
+ If you say yes here you get support for Asahi Kasei's
+ orientation sensor AK8975.
+
+config EP93XX_PWM
+ tristate "EP93xx PWM support"
+ depends on ARCH_EP93XX
+ help
+ This option enables device driver support for the PWM channels
+ on the Cirrus EP93xx processors. The EP9307 chip only has one
+ PWM channel all the others have two, the second channel is an
+ alternate function of the EGPIO14 pin. A sysfs interface is
+ provided to control the PWM channels.
+
+ To compile this driver as a module, choose M here: the module will
+ be called ep93xx_pwm.
+
+config DS1682
+ tristate "Dallas DS1682 Total Elapsed Time Recorder with Alarm"
+ depends on I2C && EXPERIMENTAL
+ help
+ If you say yes here you get support for Dallas Semiconductor
+ DS1682 Total Elapsed Time Recorder.
+
+ This driver can also be built as a module. If so, the module
+ will be called ds1682.
+
+config SPEAR13XX_PCIE_GADGET
+ bool "PCIe gadget support for SPEAr13XX platform"
+ depends on ARCH_SPEAR13XX
+ default n
+ help
+ This option enables gadget support for PCIe controller. If
+ board file defines any controller as PCIe endpoint then a sysfs
+ entry will be created for that controller. User can use these
+ sysfs node to configure PCIe EP as per his requirements.
+
+config TI_DAC7512
+ tristate "Texas Instruments DAC7512"
+ depends on SPI && SYSFS
+ help
+ If you say yes here you get support for the Texas Instruments
+ DAC7512 16-bit digital-to-analog converter.
+
+ This driver can also be built as a module. If so, the module
+ will be called ti_dac7512.
+
+config UID_STAT
+ bool "UID based statistics tracking exported to /proc/uid_stat"
+ default n
+
+config VMWARE_BALLOON
+ tristate "VMware Balloon Driver"
+ depends on X86
+ help
+ This is VMware physical memory management driver which acts
+ like a "balloon" that can be inflated to reclaim physical pages
+ by reserving them in the guest and invalidating them in the
+ monitor, freeing up the underlying machine pages so they can
+ be allocated to other guests. The balloon can also be deflated
+ to allow the guest to use more physical memory.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called vmw_balloon.
+
+config ARM_CHARLCD
+ bool "ARM Ltd. Character LCD Driver"
+ depends on PLAT_VERSATILE
+ help
+ This is a driver for the character LCD found on the ARM Ltd.
+ Versatile and RealView Platform Baseboards. It doesn't do
+ very much more than display the text "ARM Linux" on the first
+ line and the Linux version on the second line, but that's
+ still useful.
+
+config BMP085
+ tristate "BMP085 digital pressure sensor"
+ depends on I2C && SYSFS
+ help
+ If you say yes here you get support for the Bosch Sensortec
+ BMP085 digital pressure sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called bmp085.
+
+config PCH_PHUB
+ tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) PHUB"
+ depends on PCI
+ help
+ This driver is for PCH(Platform controller Hub) PHUB(Packet Hub) of
+ Intel Topcliff which is an IOH(Input/Output Hub) for x86 embedded
+ processor. The Topcliff has MAC address and Option ROM data in SROM.
+ This driver can access MAC address and Option ROM data in SROM.
+
+ This driver also can be used for LAPIS Semiconductor's IOH,
+ ML7213/ML7223/ML7831.
+ ML7213 which is for IVI(In-Vehicle Infotainment) use.
+ ML7223 IOH is for MP(Media Phone) use.
+ ML7831 IOH is for general purpose use.
+ ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
+ ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
+
+ To compile this driver as a module, choose M here: the module will
+ be called pch_phub.
+
+config USB_SWITCH_FSA9480
+ tristate "FSA9480 USB Switch"
+ depends on I2C
+ help
+ The FSA9480 is a USB port accessory detector and switch.
+ The FSA9480 is fully controlled using I2C and enables USB data,
+ stereo and mono audio, video, microphone and UART data to use
+ a common connector port.
+
+config MAX8997_MUIC
+ tristate "MAX8997 MUIC Support"
+ depends on MFD_MAX8997
+ help
+ If you say yes here you get support for the MUIC device of
+ Maxim MAX8997 PMIC.
+ The MAX8997 MUIC is a USB port accessory detector and switch.
+
+config WL127X_RFKILL
+ tristate "Bluetooth power control driver for TI wl127x"
+ depends on RFKILL
+ default n
+ ---help---
+ Creates an rfkill entry in sysfs for power control of Bluetooth
+ TI wl127x chips.
+
+config PN547_NFC
+ tristate "PN547 NFC driver"
+ depends on I2C
+ select CRC_CCITT
+ default n
+ ---help---
+ Say yes if you want PN547 Near Field Communication driver.
+ This is for i2c connected version. If unsure, say N here.
+
+ To compile this driver as a module, choose m here. The module will
+ be called pn547.
+
+
+source "drivers/misc/c2port/Kconfig"
+source "drivers/misc/eeprom/Kconfig"
+source "drivers/misc/cb710/Kconfig"
+source "drivers/misc/iwmc3200top/Kconfig"
+source "drivers/misc/ti-st/Kconfig"
+source "drivers/misc/lis3lv02d/Kconfig"
+source "drivers/misc/carma/Kconfig"
+source "drivers/misc/altera-stapl/Kconfig"
+endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
new file mode 100644
index 00000000..f2e3c9ef
--- /dev/null
+++ b/drivers/misc/Makefile
@@ -0,0 +1,62 @@
+#
+# Makefile for misc devices that really don't fit anywhere else.
+#
+
+obj-$(CONFIG_IBM_ASM) += ibmasm/
+obj-$(CONFIG_AD525X_DPOT) += ad525x_dpot.o
+obj-$(CONFIG_AD525X_DPOT_I2C) += ad525x_dpot-i2c.o
+obj-$(CONFIG_AD525X_DPOT_SPI) += ad525x_dpot-spi.o
+obj-$(CONFIG_INTEL_MID_PTI) += pti.o
+obj-$(CONFIG_ATMEL_PWM) += atmel_pwm.o
+obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o
+obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o
+obj-$(CONFIG_BMP085) += bmp085.o
+obj-$(CONFIG_ICS932S401) += ics932s401.o
+obj-$(CONFIG_LKDTM) += lkdtm.o
+obj-$(CONFIG_TIFM_CORE) += tifm_core.o
+obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o
+obj-$(CONFIG_PHANTOM) += phantom.o
+obj-$(CONFIG_SENSORS_BH1780) += bh1780gli.o
+obj-$(CONFIG_SENSORS_BH1770) += bh1770glc.o
+obj-$(CONFIG_SENSORS_APDS990X) += apds990x.o
+obj-$(CONFIG_SGI_IOC4) += ioc4.o
+obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
+obj-$(CONFIG_KGDB_TESTS) += kgdbts.o
+obj-$(CONFIG_SGI_XP) += sgi-xp/
+obj-$(CONFIG_SGI_GRU) += sgi-gru/
+obj-$(CONFIG_CS5535_MFGPT) += cs5535-mfgpt.o
+obj-$(CONFIG_HP_ILO) += hpilo.o
+obj-$(CONFIG_APDS9802ALS) += apds9802als.o
+obj-$(CONFIG_ISL29003) += isl29003.o
+obj-$(CONFIG_ISL29020) += isl29020.o
+obj-$(CONFIG_SENSORS_TSL2550) += tsl2550.o
+obj-$(CONFIG_EP93XX_PWM) += ep93xx_pwm.o
+obj-$(CONFIG_DS1682) += ds1682.o
+obj-$(CONFIG_TI_DAC7512) += ti_dac7512.o
+obj-$(CONFIG_UID_STAT) += uid_stat.o
+obj-$(CONFIG_C2PORT) += c2port/
+obj-$(CONFIG_IWMC3200TOP) += iwmc3200top/
+obj-$(CONFIG_HMC6352) += hmc6352.o
+obj-y += eeprom/
+obj-y += cb710/
+obj-$(CONFIG_SPEAR13XX_PCIE_GADGET) += spear13xx_pcie_gadget.o
+obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o
+obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o
+obj-$(CONFIG_PCH_PHUB) += pch_phub.o
+obj-y += ti-st/
+obj-$(CONFIG_AB8500_PWM) += ab8500-pwm.o
+obj-y += lis3lv02d/
+obj-y += carma/
+obj-$(CONFIG_USB_SWITCH_FSA9480) += fsa9480.o
+obj-$(CONFIG_ALTERA_STAPL) +=altera-stapl/
+obj-$(CONFIG_MAX8997_MUIC) += max8997-muic.o
+obj-$(CONFIG_WL127X_RFKILL) += wl127x-rfkill.o
+obj-$(CONFIG_SENSORS_AK8975) += akm8975.o
+obj-y += rsa/
+obj-y += viatel/
+obj-y += mediatek/
+wmt_pn547-objs += pn547.o
+obj-$(CONFIG_PN547_NFC) += wmt_pn547.o
+
+s_wmt_nfc_bcm2079x-objs += bcm2079x-i2c.o
+obj-$(CONFIG_BCM2079X_NFC) += s_wmt_nfc_bcm2079x.o \ No newline at end of file
diff --git a/drivers/misc/ab8500-pwm.c b/drivers/misc/ab8500-pwm.c
new file mode 100644
index 00000000..d7a9aa14
--- /dev/null
+++ b/drivers/misc/ab8500-pwm.c
@@ -0,0 +1,169 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Arun R Murthy <arun.murthy@stericsson.com>
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/pwm.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab8500.h>
+#include <linux/module.h>
+
+/*
+ * PWM Out generators
+ * Bank: 0x10
+ */
+#define AB8500_PWM_OUT_CTRL1_REG 0x60
+#define AB8500_PWM_OUT_CTRL2_REG 0x61
+#define AB8500_PWM_OUT_CTRL7_REG 0x66
+
+/* backlight driver constants */
+#define ENABLE_PWM 1
+#define DISABLE_PWM 0
+
+struct pwm_device {
+ struct device *dev;
+ struct list_head node;
+ const char *label;
+ unsigned int pwm_id;
+};
+
+static LIST_HEAD(pwm_list);
+
+int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
+{
+ int ret = 0;
+ unsigned int higher_val, lower_val;
+ u8 reg;
+
+ /*
+ * get the first 8 bits that are be written to
+ * AB8500_PWM_OUT_CTRL1_REG[0:7]
+ */
+ lower_val = duty_ns & 0x00FF;
+ /*
+ * get bits [9:10] that are to be written to
+ * AB8500_PWM_OUT_CTRL2_REG[0:1]
+ */
+ higher_val = ((duty_ns & 0x0300) >> 8);
+
+ reg = AB8500_PWM_OUT_CTRL1_REG + ((pwm->pwm_id - 1) * 2);
+
+ ret = abx500_set_register_interruptible(pwm->dev, AB8500_MISC,
+ reg, (u8)lower_val);
+ if (ret < 0)
+ return ret;
+ ret = abx500_set_register_interruptible(pwm->dev, AB8500_MISC,
+ (reg + 1), (u8)higher_val);
+
+ return ret;
+}
+EXPORT_SYMBOL(pwm_config);
+
+int pwm_enable(struct pwm_device *pwm)
+{
+ int ret;
+
+ ret = abx500_mask_and_set_register_interruptible(pwm->dev,
+ AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
+ 1 << (pwm->pwm_id-1), ENABLE_PWM);
+ if (ret < 0)
+ dev_err(pwm->dev, "%s: Failed to disable PWM, Error %d\n",
+ pwm->label, ret);
+ return ret;
+}
+EXPORT_SYMBOL(pwm_enable);
+
+void pwm_disable(struct pwm_device *pwm)
+{
+ int ret;
+
+ ret = abx500_mask_and_set_register_interruptible(pwm->dev,
+ AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
+ 1 << (pwm->pwm_id-1), DISABLE_PWM);
+ if (ret < 0)
+ dev_err(pwm->dev, "%s: Failed to disable PWM, Error %d\n",
+ pwm->label, ret);
+ return;
+}
+EXPORT_SYMBOL(pwm_disable);
+
+struct pwm_device *pwm_request(int pwm_id, const char *label)
+{
+ struct pwm_device *pwm;
+
+ list_for_each_entry(pwm, &pwm_list, node) {
+ if (pwm->pwm_id == pwm_id) {
+ pwm->label = label;
+ pwm->pwm_id = pwm_id;
+ return pwm;
+ }
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+EXPORT_SYMBOL(pwm_request);
+
+void pwm_free(struct pwm_device *pwm)
+{
+ pwm_disable(pwm);
+}
+EXPORT_SYMBOL(pwm_free);
+
+static int __devinit ab8500_pwm_probe(struct platform_device *pdev)
+{
+ struct pwm_device *pwm;
+ /*
+ * Nothing to be done in probe, this is required to get the
+ * device which is required for ab8500 read and write
+ */
+ pwm = kzalloc(sizeof(struct pwm_device), GFP_KERNEL);
+ if (pwm == NULL) {
+ dev_err(&pdev->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+ pwm->dev = &pdev->dev;
+ pwm->pwm_id = pdev->id;
+ list_add_tail(&pwm->node, &pwm_list);
+ platform_set_drvdata(pdev, pwm);
+ dev_dbg(pwm->dev, "pwm probe successful\n");
+ return 0;
+}
+
+static int __devexit ab8500_pwm_remove(struct platform_device *pdev)
+{
+ struct pwm_device *pwm = platform_get_drvdata(pdev);
+ list_del(&pwm->node);
+ dev_dbg(&pdev->dev, "pwm driver removed\n");
+ kfree(pwm);
+ return 0;
+}
+
+static struct platform_driver ab8500_pwm_driver = {
+ .driver = {
+ .name = "ab8500-pwm",
+ .owner = THIS_MODULE,
+ },
+ .probe = ab8500_pwm_probe,
+ .remove = __devexit_p(ab8500_pwm_remove),
+};
+
+static int __init ab8500_pwm_init(void)
+{
+ return platform_driver_register(&ab8500_pwm_driver);
+}
+
+static void __exit ab8500_pwm_exit(void)
+{
+ platform_driver_unregister(&ab8500_pwm_driver);
+}
+
+subsys_initcall(ab8500_pwm_init);
+module_exit(ab8500_pwm_exit);
+MODULE_AUTHOR("Arun MURTHY <arun.murthy@stericsson.com>");
+MODULE_DESCRIPTION("AB8500 Pulse Width Modulation Driver");
+MODULE_ALIAS("platform:ab8500-pwm");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/ad525x_dpot-i2c.c b/drivers/misc/ad525x_dpot-i2c.c
new file mode 100644
index 00000000..82082627
--- /dev/null
+++ b/drivers/misc/ad525x_dpot-i2c.c
@@ -0,0 +1,121 @@
+/*
+ * Driver for the Analog Devices digital potentiometers (I2C bus)
+ *
+ * Copyright (C) 2010-2011 Michael Hennerich, Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+
+#include "ad525x_dpot.h"
+
+/* I2C bus functions */
+static int write_d8(void *client, u8 val)
+{
+ return i2c_smbus_write_byte(client, val);
+}
+
+static int write_r8d8(void *client, u8 reg, u8 val)
+{
+ return i2c_smbus_write_byte_data(client, reg, val);
+}
+
+static int write_r8d16(void *client, u8 reg, u16 val)
+{
+ return i2c_smbus_write_word_data(client, reg, val);
+}
+
+static int read_d8(void *client)
+{
+ return i2c_smbus_read_byte(client);
+}
+
+static int read_r8d8(void *client, u8 reg)
+{
+ return i2c_smbus_read_byte_data(client, reg);
+}
+
+static int read_r8d16(void *client, u8 reg)
+{
+ return i2c_smbus_read_word_data(client, reg);
+}
+
+static const struct ad_dpot_bus_ops bops = {
+ .read_d8 = read_d8,
+ .read_r8d8 = read_r8d8,
+ .read_r8d16 = read_r8d16,
+ .write_d8 = write_d8,
+ .write_r8d8 = write_r8d8,
+ .write_r8d16 = write_r8d16,
+};
+
+static int __devinit ad_dpot_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ad_dpot_bus_data bdata = {
+ .client = client,
+ .bops = &bops,
+ };
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_WORD_DATA)) {
+ dev_err(&client->dev, "SMBUS Word Data not Supported\n");
+ return -EIO;
+ }
+
+ return ad_dpot_probe(&client->dev, &bdata, id->driver_data, id->name);
+}
+
+static int __devexit ad_dpot_i2c_remove(struct i2c_client *client)
+{
+ return ad_dpot_remove(&client->dev);
+}
+
+static const struct i2c_device_id ad_dpot_id[] = {
+ {"ad5258", AD5258_ID},
+ {"ad5259", AD5259_ID},
+ {"ad5251", AD5251_ID},
+ {"ad5252", AD5252_ID},
+ {"ad5253", AD5253_ID},
+ {"ad5254", AD5254_ID},
+ {"ad5255", AD5255_ID},
+ {"ad5241", AD5241_ID},
+ {"ad5242", AD5242_ID},
+ {"ad5243", AD5243_ID},
+ {"ad5245", AD5245_ID},
+ {"ad5246", AD5246_ID},
+ {"ad5247", AD5247_ID},
+ {"ad5248", AD5248_ID},
+ {"ad5280", AD5280_ID},
+ {"ad5282", AD5282_ID},
+ {"adn2860", ADN2860_ID},
+ {"ad5273", AD5273_ID},
+ {"ad5161", AD5161_ID},
+ {"ad5171", AD5171_ID},
+ {"ad5170", AD5170_ID},
+ {"ad5172", AD5172_ID},
+ {"ad5173", AD5173_ID},
+ {"ad5272", AD5272_ID},
+ {"ad5274", AD5274_ID},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ad_dpot_id);
+
+static struct i2c_driver ad_dpot_i2c_driver = {
+ .driver = {
+ .name = "ad_dpot",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad_dpot_i2c_probe,
+ .remove = __devexit_p(ad_dpot_i2c_remove),
+ .id_table = ad_dpot_id,
+};
+
+module_i2c_driver(ad_dpot_i2c_driver);
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("digital potentiometer I2C bus driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("i2c:ad_dpot");
diff --git a/drivers/misc/ad525x_dpot-spi.c b/drivers/misc/ad525x_dpot-spi.c
new file mode 100644
index 00000000..f6231754
--- /dev/null
+++ b/drivers/misc/ad525x_dpot-spi.c
@@ -0,0 +1,143 @@
+/*
+ * Driver for the Analog Devices digital potentiometers (SPI bus)
+ *
+ * Copyright (C) 2010-2011 Michael Hennerich, Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/spi/spi.h>
+#include <linux/module.h>
+
+#include "ad525x_dpot.h"
+
+/* SPI bus functions */
+static int write8(void *client, u8 val)
+{
+ u8 data = val;
+ return spi_write(client, &data, 1);
+}
+
+static int write16(void *client, u8 reg, u8 val)
+{
+ u8 data[2] = {reg, val};
+ return spi_write(client, data, 2);
+}
+
+static int write24(void *client, u8 reg, u16 val)
+{
+ u8 data[3] = {reg, val >> 8, val};
+ return spi_write(client, data, 3);
+}
+
+static int read8(void *client)
+{
+ int ret;
+ u8 data;
+ ret = spi_read(client, &data, 1);
+ if (ret < 0)
+ return ret;
+
+ return data;
+}
+
+static int read16(void *client, u8 reg)
+{
+ int ret;
+ u8 buf_rx[2];
+
+ write16(client, reg, 0);
+ ret = spi_read(client, buf_rx, 2);
+ if (ret < 0)
+ return ret;
+
+ return (buf_rx[0] << 8) | buf_rx[1];
+}
+
+static int read24(void *client, u8 reg)
+{
+ int ret;
+ u8 buf_rx[3];
+
+ write24(client, reg, 0);
+ ret = spi_read(client, buf_rx, 3);
+ if (ret < 0)
+ return ret;
+
+ return (buf_rx[1] << 8) | buf_rx[2];
+}
+
+static const struct ad_dpot_bus_ops bops = {
+ .read_d8 = read8,
+ .read_r8d8 = read16,
+ .read_r8d16 = read24,
+ .write_d8 = write8,
+ .write_r8d8 = write16,
+ .write_r8d16 = write24,
+};
+static int __devinit ad_dpot_spi_probe(struct spi_device *spi)
+{
+ struct ad_dpot_bus_data bdata = {
+ .client = spi,
+ .bops = &bops,
+ };
+
+ return ad_dpot_probe(&spi->dev, &bdata,
+ spi_get_device_id(spi)->driver_data,
+ spi_get_device_id(spi)->name);
+}
+
+static int __devexit ad_dpot_spi_remove(struct spi_device *spi)
+{
+ return ad_dpot_remove(&spi->dev);
+}
+
+static const struct spi_device_id ad_dpot_spi_id[] = {
+ {"ad5160", AD5160_ID},
+ {"ad5161", AD5161_ID},
+ {"ad5162", AD5162_ID},
+ {"ad5165", AD5165_ID},
+ {"ad5200", AD5200_ID},
+ {"ad5201", AD5201_ID},
+ {"ad5203", AD5203_ID},
+ {"ad5204", AD5204_ID},
+ {"ad5206", AD5206_ID},
+ {"ad5207", AD5207_ID},
+ {"ad5231", AD5231_ID},
+ {"ad5232", AD5232_ID},
+ {"ad5233", AD5233_ID},
+ {"ad5235", AD5235_ID},
+ {"ad5260", AD5260_ID},
+ {"ad5262", AD5262_ID},
+ {"ad5263", AD5263_ID},
+ {"ad5290", AD5290_ID},
+ {"ad5291", AD5291_ID},
+ {"ad5292", AD5292_ID},
+ {"ad5293", AD5293_ID},
+ {"ad7376", AD7376_ID},
+ {"ad8400", AD8400_ID},
+ {"ad8402", AD8402_ID},
+ {"ad8403", AD8403_ID},
+ {"adn2850", ADN2850_ID},
+ {"ad5270", AD5270_ID},
+ {"ad5271", AD5271_ID},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad_dpot_spi_id);
+
+static struct spi_driver ad_dpot_spi_driver = {
+ .driver = {
+ .name = "ad_dpot",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad_dpot_spi_probe,
+ .remove = __devexit_p(ad_dpot_spi_remove),
+ .id_table = ad_dpot_spi_id,
+};
+
+module_spi_driver(ad_dpot_spi_driver);
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("digital potentiometer SPI bus driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:ad_dpot");
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
new file mode 100644
index 00000000..1d1d4261
--- /dev/null
+++ b/drivers/misc/ad525x_dpot.c
@@ -0,0 +1,771 @@
+/*
+ * ad525x_dpot: Driver for the Analog Devices digital potentiometers
+ * Copyright (c) 2009-2010 Analog Devices, Inc.
+ * Author: Michael Hennerich <hennerich@blackfin.uclinux.org>
+ *
+ * DEVID #Wipers #Positions Resistor Options (kOhm)
+ * AD5258 1 64 1, 10, 50, 100
+ * AD5259 1 256 5, 10, 50, 100
+ * AD5251 2 64 1, 10, 50, 100
+ * AD5252 2 256 1, 10, 50, 100
+ * AD5255 3 512 25, 250
+ * AD5253 4 64 1, 10, 50, 100
+ * AD5254 4 256 1, 10, 50, 100
+ * AD5160 1 256 5, 10, 50, 100
+ * AD5161 1 256 5, 10, 50, 100
+ * AD5162 2 256 2.5, 10, 50, 100
+ * AD5165 1 256 100
+ * AD5200 1 256 10, 50
+ * AD5201 1 33 10, 50
+ * AD5203 4 64 10, 100
+ * AD5204 4 256 10, 50, 100
+ * AD5206 6 256 10, 50, 100
+ * AD5207 2 256 10, 50, 100
+ * AD5231 1 1024 10, 50, 100
+ * AD5232 2 256 10, 50, 100
+ * AD5233 4 64 10, 50, 100
+ * AD5235 2 1024 25, 250
+ * AD5260 1 256 20, 50, 200
+ * AD5262 2 256 20, 50, 200
+ * AD5263 4 256 20, 50, 200
+ * AD5290 1 256 10, 50, 100
+ * AD5291 1 256 20, 50, 100 (20-TP)
+ * AD5292 1 1024 20, 50, 100 (20-TP)
+ * AD5293 1 1024 20, 50, 100
+ * AD7376 1 128 10, 50, 100, 1M
+ * AD8400 1 256 1, 10, 50, 100
+ * AD8402 2 256 1, 10, 50, 100
+ * AD8403 4 256 1, 10, 50, 100
+ * ADN2850 3 512 25, 250
+ * AD5241 1 256 10, 100, 1M
+ * AD5246 1 128 5, 10, 50, 100
+ * AD5247 1 128 5, 10, 50, 100
+ * AD5245 1 256 5, 10, 50, 100
+ * AD5243 2 256 2.5, 10, 50, 100
+ * AD5248 2 256 2.5, 10, 50, 100
+ * AD5242 2 256 20, 50, 200
+ * AD5280 1 256 20, 50, 200
+ * AD5282 2 256 20, 50, 200
+ * ADN2860 3 512 25, 250
+ * AD5273 1 64 1, 10, 50, 100 (OTP)
+ * AD5171 1 64 5, 10, 50, 100 (OTP)
+ * AD5170 1 256 2.5, 10, 50, 100 (OTP)
+ * AD5172 2 256 2.5, 10, 50, 100 (OTP)
+ * AD5173 2 256 2.5, 10, 50, 100 (OTP)
+ * AD5270 1 1024 20, 50, 100 (50-TP)
+ * AD5271 1 256 20, 50, 100 (50-TP)
+ * AD5272 1 1024 20, 50, 100 (50-TP)
+ * AD5274 1 256 20, 50, 100 (50-TP)
+ *
+ * See Documentation/misc-devices/ad525x_dpot.txt for more info.
+ *
+ * derived from ad5258.c
+ * Copyright (c) 2009 Cyber Switching, Inc.
+ * Author: Chris Verges <chrisv@cyberswitching.com>
+ *
+ * derived from ad5252.c
+ * Copyright (c) 2006-2011 Michael Hennerich <hennerich@blackfin.uclinux.org>
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include "ad525x_dpot.h"
+
+/*
+ * Client data (each client gets its own)
+ */
+
+struct dpot_data {
+ struct ad_dpot_bus_data bdata;
+ struct mutex update_lock;
+ unsigned rdac_mask;
+ unsigned max_pos;
+ unsigned long devid;
+ unsigned uid;
+ unsigned feat;
+ unsigned wipers;
+ u16 rdac_cache[MAX_RDACS];
+ DECLARE_BITMAP(otp_en_mask, MAX_RDACS);
+};
+
+static inline int dpot_read_d8(struct dpot_data *dpot)
+{
+ return dpot->bdata.bops->read_d8(dpot->bdata.client);
+}
+
+static inline int dpot_read_r8d8(struct dpot_data *dpot, u8 reg)
+{
+ return dpot->bdata.bops->read_r8d8(dpot->bdata.client, reg);
+}
+
+static inline int dpot_read_r8d16(struct dpot_data *dpot, u8 reg)
+{
+ return dpot->bdata.bops->read_r8d16(dpot->bdata.client, reg);
+}
+
+static inline int dpot_write_d8(struct dpot_data *dpot, u8 val)
+{
+ return dpot->bdata.bops->write_d8(dpot->bdata.client, val);
+}
+
+static inline int dpot_write_r8d8(struct dpot_data *dpot, u8 reg, u16 val)
+{
+ return dpot->bdata.bops->write_r8d8(dpot->bdata.client, reg, val);
+}
+
+static inline int dpot_write_r8d16(struct dpot_data *dpot, u8 reg, u16 val)
+{
+ return dpot->bdata.bops->write_r8d16(dpot->bdata.client, reg, val);
+}
+
+static s32 dpot_read_spi(struct dpot_data *dpot, u8 reg)
+{
+ unsigned ctrl = 0;
+ int value;
+
+ if (!(reg & (DPOT_ADDR_EEPROM | DPOT_ADDR_CMD))) {
+
+ if (dpot->feat & F_RDACS_WONLY)
+ return dpot->rdac_cache[reg & DPOT_RDAC_MASK];
+ if (dpot->uid == DPOT_UID(AD5291_ID) ||
+ dpot->uid == DPOT_UID(AD5292_ID) ||
+ dpot->uid == DPOT_UID(AD5293_ID)) {
+
+ value = dpot_read_r8d8(dpot,
+ DPOT_AD5291_READ_RDAC << 2);
+
+ if (dpot->uid == DPOT_UID(AD5291_ID))
+ value = value >> 2;
+
+ return value;
+ } else if (dpot->uid == DPOT_UID(AD5270_ID) ||
+ dpot->uid == DPOT_UID(AD5271_ID)) {
+
+ value = dpot_read_r8d8(dpot,
+ DPOT_AD5270_1_2_4_READ_RDAC << 2);
+
+ if (value < 0)
+ return value;
+
+ if (dpot->uid == DPOT_UID(AD5271_ID))
+ value = value >> 2;
+
+ return value;
+ }
+
+ ctrl = DPOT_SPI_READ_RDAC;
+ } else if (reg & DPOT_ADDR_EEPROM) {
+ ctrl = DPOT_SPI_READ_EEPROM;
+ }
+
+ if (dpot->feat & F_SPI_16BIT)
+ return dpot_read_r8d8(dpot, ctrl);
+ else if (dpot->feat & F_SPI_24BIT)
+ return dpot_read_r8d16(dpot, ctrl);
+
+ return -EFAULT;
+}
+
+static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg)
+{
+ int value;
+ unsigned ctrl = 0;
+ switch (dpot->uid) {
+ case DPOT_UID(AD5246_ID):
+ case DPOT_UID(AD5247_ID):
+ return dpot_read_d8(dpot);
+ case DPOT_UID(AD5245_ID):
+ case DPOT_UID(AD5241_ID):
+ case DPOT_UID(AD5242_ID):
+ case DPOT_UID(AD5243_ID):
+ case DPOT_UID(AD5248_ID):
+ case DPOT_UID(AD5280_ID):
+ case DPOT_UID(AD5282_ID):
+ ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ?
+ 0 : DPOT_AD5282_RDAC_AB;
+ return dpot_read_r8d8(dpot, ctrl);
+ case DPOT_UID(AD5170_ID):
+ case DPOT_UID(AD5171_ID):
+ case DPOT_UID(AD5273_ID):
+ return dpot_read_d8(dpot);
+ case DPOT_UID(AD5172_ID):
+ case DPOT_UID(AD5173_ID):
+ ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ?
+ 0 : DPOT_AD5172_3_A0;
+ return dpot_read_r8d8(dpot, ctrl);
+ case DPOT_UID(AD5272_ID):
+ case DPOT_UID(AD5274_ID):
+ dpot_write_r8d8(dpot,
+ (DPOT_AD5270_1_2_4_READ_RDAC << 2), 0);
+
+ value = dpot_read_r8d16(dpot,
+ DPOT_AD5270_1_2_4_RDAC << 2);
+
+ if (value < 0)
+ return value;
+ /*
+ * AD5272/AD5274 returns high byte first, however
+ * underling smbus expects low byte first.
+ */
+ value = swab16(value);
+
+ if (dpot->uid == DPOT_UID(AD5271_ID))
+ value = value >> 2;
+ return value;
+ default:
+ if ((reg & DPOT_REG_TOL) || (dpot->max_pos > 256))
+ return dpot_read_r8d16(dpot, (reg & 0xF8) |
+ ((reg & 0x7) << 1));
+ else
+ return dpot_read_r8d8(dpot, reg);
+ }
+}
+
+static s32 dpot_read(struct dpot_data *dpot, u8 reg)
+{
+ if (dpot->feat & F_SPI)
+ return dpot_read_spi(dpot, reg);
+ else
+ return dpot_read_i2c(dpot, reg);
+}
+
+static s32 dpot_write_spi(struct dpot_data *dpot, u8 reg, u16 value)
+{
+ unsigned val = 0;
+
+ if (!(reg & (DPOT_ADDR_EEPROM | DPOT_ADDR_CMD | DPOT_ADDR_OTP))) {
+ if (dpot->feat & F_RDACS_WONLY)
+ dpot->rdac_cache[reg & DPOT_RDAC_MASK] = value;
+
+ if (dpot->feat & F_AD_APPDATA) {
+ if (dpot->feat & F_SPI_8BIT) {
+ val = ((reg & DPOT_RDAC_MASK) <<
+ DPOT_MAX_POS(dpot->devid)) |
+ value;
+ return dpot_write_d8(dpot, val);
+ } else if (dpot->feat & F_SPI_16BIT) {
+ val = ((reg & DPOT_RDAC_MASK) <<
+ DPOT_MAX_POS(dpot->devid)) |
+ value;
+ return dpot_write_r8d8(dpot, val >> 8,
+ val & 0xFF);
+ } else
+ BUG();
+ } else {
+ if (dpot->uid == DPOT_UID(AD5291_ID) ||
+ dpot->uid == DPOT_UID(AD5292_ID) ||
+ dpot->uid == DPOT_UID(AD5293_ID)) {
+
+ dpot_write_r8d8(dpot, DPOT_AD5291_CTRLREG << 2,
+ DPOT_AD5291_UNLOCK_CMD);
+
+ if (dpot->uid == DPOT_UID(AD5291_ID))
+ value = value << 2;
+
+ return dpot_write_r8d8(dpot,
+ (DPOT_AD5291_RDAC << 2) |
+ (value >> 8), value & 0xFF);
+ } else if (dpot->uid == DPOT_UID(AD5270_ID) ||
+ dpot->uid == DPOT_UID(AD5271_ID)) {
+ dpot_write_r8d8(dpot,
+ DPOT_AD5270_1_2_4_CTRLREG << 2,
+ DPOT_AD5270_1_2_4_UNLOCK_CMD);
+
+ if (dpot->uid == DPOT_UID(AD5271_ID))
+ value = value << 2;
+
+ return dpot_write_r8d8(dpot,
+ (DPOT_AD5270_1_2_4_RDAC << 2) |
+ (value >> 8), value & 0xFF);
+ }
+ val = DPOT_SPI_RDAC | (reg & DPOT_RDAC_MASK);
+ }
+ } else if (reg & DPOT_ADDR_EEPROM) {
+ val = DPOT_SPI_EEPROM | (reg & DPOT_RDAC_MASK);
+ } else if (reg & DPOT_ADDR_CMD) {
+ switch (reg) {
+ case DPOT_DEC_ALL_6DB:
+ val = DPOT_SPI_DEC_ALL_6DB;
+ break;
+ case DPOT_INC_ALL_6DB:
+ val = DPOT_SPI_INC_ALL_6DB;
+ break;
+ case DPOT_DEC_ALL:
+ val = DPOT_SPI_DEC_ALL;
+ break;
+ case DPOT_INC_ALL:
+ val = DPOT_SPI_INC_ALL;
+ break;
+ }
+ } else if (reg & DPOT_ADDR_OTP) {
+ if (dpot->uid == DPOT_UID(AD5291_ID) ||
+ dpot->uid == DPOT_UID(AD5292_ID)) {
+ return dpot_write_r8d8(dpot,
+ DPOT_AD5291_STORE_XTPM << 2, 0);
+ } else if (dpot->uid == DPOT_UID(AD5270_ID) ||
+ dpot->uid == DPOT_UID(AD5271_ID)) {
+ return dpot_write_r8d8(dpot,
+ DPOT_AD5270_1_2_4_STORE_XTPM << 2, 0);
+ }
+ } else
+ BUG();
+
+ if (dpot->feat & F_SPI_16BIT)
+ return dpot_write_r8d8(dpot, val, value);
+ else if (dpot->feat & F_SPI_24BIT)
+ return dpot_write_r8d16(dpot, val, value);
+
+ return -EFAULT;
+}
+
+static s32 dpot_write_i2c(struct dpot_data *dpot, u8 reg, u16 value)
+{
+ /* Only write the instruction byte for certain commands */
+ unsigned tmp = 0, ctrl = 0;
+
+ switch (dpot->uid) {
+ case DPOT_UID(AD5246_ID):
+ case DPOT_UID(AD5247_ID):
+ return dpot_write_d8(dpot, value);
+ break;
+
+ case DPOT_UID(AD5245_ID):
+ case DPOT_UID(AD5241_ID):
+ case DPOT_UID(AD5242_ID):
+ case DPOT_UID(AD5243_ID):
+ case DPOT_UID(AD5248_ID):
+ case DPOT_UID(AD5280_ID):
+ case DPOT_UID(AD5282_ID):
+ ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ?
+ 0 : DPOT_AD5282_RDAC_AB;
+ return dpot_write_r8d8(dpot, ctrl, value);
+ break;
+ case DPOT_UID(AD5171_ID):
+ case DPOT_UID(AD5273_ID):
+ if (reg & DPOT_ADDR_OTP) {
+ tmp = dpot_read_d8(dpot);
+ if (tmp >> 6) /* Ready to Program? */
+ return -EFAULT;
+ ctrl = DPOT_AD5273_FUSE;
+ }
+ return dpot_write_r8d8(dpot, ctrl, value);
+ break;
+ case DPOT_UID(AD5172_ID):
+ case DPOT_UID(AD5173_ID):
+ ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ?
+ 0 : DPOT_AD5172_3_A0;
+ if (reg & DPOT_ADDR_OTP) {
+ tmp = dpot_read_r8d16(dpot, ctrl);
+ if (tmp >> 14) /* Ready to Program? */
+ return -EFAULT;
+ ctrl |= DPOT_AD5170_2_3_FUSE;
+ }
+ return dpot_write_r8d8(dpot, ctrl, value);
+ break;
+ case DPOT_UID(AD5170_ID):
+ if (reg & DPOT_ADDR_OTP) {
+ tmp = dpot_read_r8d16(dpot, tmp);
+ if (tmp >> 14) /* Ready to Program? */
+ return -EFAULT;
+ ctrl = DPOT_AD5170_2_3_FUSE;
+ }
+ return dpot_write_r8d8(dpot, ctrl, value);
+ break;
+ case DPOT_UID(AD5272_ID):
+ case DPOT_UID(AD5274_ID):
+ dpot_write_r8d8(dpot, DPOT_AD5270_1_2_4_CTRLREG << 2,
+ DPOT_AD5270_1_2_4_UNLOCK_CMD);
+
+ if (reg & DPOT_ADDR_OTP)
+ return dpot_write_r8d8(dpot,
+ DPOT_AD5270_1_2_4_STORE_XTPM << 2, 0);
+
+ if (dpot->uid == DPOT_UID(AD5274_ID))
+ value = value << 2;
+
+ return dpot_write_r8d8(dpot, (DPOT_AD5270_1_2_4_RDAC << 2) |
+ (value >> 8), value & 0xFF);
+ break;
+ default:
+ if (reg & DPOT_ADDR_CMD)
+ return dpot_write_d8(dpot, reg);
+
+ if (dpot->max_pos > 256)
+ return dpot_write_r8d16(dpot, (reg & 0xF8) |
+ ((reg & 0x7) << 1), value);
+ else
+ /* All other registers require instruction + data bytes */
+ return dpot_write_r8d8(dpot, reg, value);
+ }
+}
+
+static s32 dpot_write(struct dpot_data *dpot, u8 reg, u16 value)
+{
+ if (dpot->feat & F_SPI)
+ return dpot_write_spi(dpot, reg, value);
+ else
+ return dpot_write_i2c(dpot, reg, value);
+}
+
+/* sysfs functions */
+
+static ssize_t sysfs_show_reg(struct device *dev,
+ struct device_attribute *attr,
+ char *buf, u32 reg)
+{
+ struct dpot_data *data = dev_get_drvdata(dev);
+ s32 value;
+
+ if (reg & DPOT_ADDR_OTP_EN)
+ return sprintf(buf, "%s\n",
+ test_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask) ?
+ "enabled" : "disabled");
+
+
+ mutex_lock(&data->update_lock);
+ value = dpot_read(data, reg);
+ mutex_unlock(&data->update_lock);
+
+ if (value < 0)
+ return -EINVAL;
+ /*
+ * Let someone else deal with converting this ...
+ * the tolerance is a two-byte value where the MSB
+ * is a sign + integer value, and the LSB is a
+ * decimal value. See page 18 of the AD5258
+ * datasheet (Rev. A) for more details.
+ */
+
+ if (reg & DPOT_REG_TOL)
+ return sprintf(buf, "0x%04x\n", value & 0xFFFF);
+ else
+ return sprintf(buf, "%u\n", value & data->rdac_mask);
+}
+
+static ssize_t sysfs_set_reg(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count, u32 reg)
+{
+ struct dpot_data *data = dev_get_drvdata(dev);
+ unsigned long value;
+ int err;
+
+ if (reg & DPOT_ADDR_OTP_EN) {
+ if (!strncmp(buf, "enabled", sizeof("enabled")))
+ set_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask);
+ else
+ clear_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask);
+
+ return count;
+ }
+
+ if ((reg & DPOT_ADDR_OTP) &&
+ !test_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask))
+ return -EPERM;
+
+ err = strict_strtoul(buf, 10, &value);
+ if (err)
+ return err;
+
+ if (value > data->rdac_mask)
+ value = data->rdac_mask;
+
+ mutex_lock(&data->update_lock);
+ dpot_write(data, reg, value);
+ if (reg & DPOT_ADDR_EEPROM)
+ msleep(26); /* Sleep while the EEPROM updates */
+ else if (reg & DPOT_ADDR_OTP)
+ msleep(400); /* Sleep while the OTP updates */
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static ssize_t sysfs_do_cmd(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count, u32 reg)
+{
+ struct dpot_data *data = dev_get_drvdata(dev);
+
+ mutex_lock(&data->update_lock);
+ dpot_write(data, reg, 0);
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+/* ------------------------------------------------------------------------- */
+
+#define DPOT_DEVICE_SHOW(_name, _reg) static ssize_t \
+show_##_name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ return sysfs_show_reg(dev, attr, buf, _reg); \
+}
+
+#define DPOT_DEVICE_SET(_name, _reg) static ssize_t \
+set_##_name(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ return sysfs_set_reg(dev, attr, buf, count, _reg); \
+}
+
+#define DPOT_DEVICE_SHOW_SET(name, reg) \
+DPOT_DEVICE_SHOW(name, reg) \
+DPOT_DEVICE_SET(name, reg) \
+static DEVICE_ATTR(name, S_IWUSR | S_IRUGO, show_##name, set_##name);
+
+#define DPOT_DEVICE_SHOW_ONLY(name, reg) \
+DPOT_DEVICE_SHOW(name, reg) \
+static DEVICE_ATTR(name, S_IWUSR | S_IRUGO, show_##name, NULL);
+
+DPOT_DEVICE_SHOW_SET(rdac0, DPOT_ADDR_RDAC | DPOT_RDAC0);
+DPOT_DEVICE_SHOW_SET(eeprom0, DPOT_ADDR_EEPROM | DPOT_RDAC0);
+DPOT_DEVICE_SHOW_ONLY(tolerance0, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC0);
+DPOT_DEVICE_SHOW_SET(otp0, DPOT_ADDR_OTP | DPOT_RDAC0);
+DPOT_DEVICE_SHOW_SET(otp0en, DPOT_ADDR_OTP_EN | DPOT_RDAC0);
+
+DPOT_DEVICE_SHOW_SET(rdac1, DPOT_ADDR_RDAC | DPOT_RDAC1);
+DPOT_DEVICE_SHOW_SET(eeprom1, DPOT_ADDR_EEPROM | DPOT_RDAC1);
+DPOT_DEVICE_SHOW_ONLY(tolerance1, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC1);
+DPOT_DEVICE_SHOW_SET(otp1, DPOT_ADDR_OTP | DPOT_RDAC1);
+DPOT_DEVICE_SHOW_SET(otp1en, DPOT_ADDR_OTP_EN | DPOT_RDAC1);
+
+DPOT_DEVICE_SHOW_SET(rdac2, DPOT_ADDR_RDAC | DPOT_RDAC2);
+DPOT_DEVICE_SHOW_SET(eeprom2, DPOT_ADDR_EEPROM | DPOT_RDAC2);
+DPOT_DEVICE_SHOW_ONLY(tolerance2, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC2);
+DPOT_DEVICE_SHOW_SET(otp2, DPOT_ADDR_OTP | DPOT_RDAC2);
+DPOT_DEVICE_SHOW_SET(otp2en, DPOT_ADDR_OTP_EN | DPOT_RDAC2);
+
+DPOT_DEVICE_SHOW_SET(rdac3, DPOT_ADDR_RDAC | DPOT_RDAC3);
+DPOT_DEVICE_SHOW_SET(eeprom3, DPOT_ADDR_EEPROM | DPOT_RDAC3);
+DPOT_DEVICE_SHOW_ONLY(tolerance3, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC3);
+DPOT_DEVICE_SHOW_SET(otp3, DPOT_ADDR_OTP | DPOT_RDAC3);
+DPOT_DEVICE_SHOW_SET(otp3en, DPOT_ADDR_OTP_EN | DPOT_RDAC3);
+
+DPOT_DEVICE_SHOW_SET(rdac4, DPOT_ADDR_RDAC | DPOT_RDAC4);
+DPOT_DEVICE_SHOW_SET(eeprom4, DPOT_ADDR_EEPROM | DPOT_RDAC4);
+DPOT_DEVICE_SHOW_ONLY(tolerance4, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC4);
+DPOT_DEVICE_SHOW_SET(otp4, DPOT_ADDR_OTP | DPOT_RDAC4);
+DPOT_DEVICE_SHOW_SET(otp4en, DPOT_ADDR_OTP_EN | DPOT_RDAC4);
+
+DPOT_DEVICE_SHOW_SET(rdac5, DPOT_ADDR_RDAC | DPOT_RDAC5);
+DPOT_DEVICE_SHOW_SET(eeprom5, DPOT_ADDR_EEPROM | DPOT_RDAC5);
+DPOT_DEVICE_SHOW_ONLY(tolerance5, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC5);
+DPOT_DEVICE_SHOW_SET(otp5, DPOT_ADDR_OTP | DPOT_RDAC5);
+DPOT_DEVICE_SHOW_SET(otp5en, DPOT_ADDR_OTP_EN | DPOT_RDAC5);
+
+static const struct attribute *dpot_attrib_wipers[] = {
+ &dev_attr_rdac0.attr,
+ &dev_attr_rdac1.attr,
+ &dev_attr_rdac2.attr,
+ &dev_attr_rdac3.attr,
+ &dev_attr_rdac4.attr,
+ &dev_attr_rdac5.attr,
+ NULL
+};
+
+static const struct attribute *dpot_attrib_eeprom[] = {
+ &dev_attr_eeprom0.attr,
+ &dev_attr_eeprom1.attr,
+ &dev_attr_eeprom2.attr,
+ &dev_attr_eeprom3.attr,
+ &dev_attr_eeprom4.attr,
+ &dev_attr_eeprom5.attr,
+ NULL
+};
+
+static const struct attribute *dpot_attrib_otp[] = {
+ &dev_attr_otp0.attr,
+ &dev_attr_otp1.attr,
+ &dev_attr_otp2.attr,
+ &dev_attr_otp3.attr,
+ &dev_attr_otp4.attr,
+ &dev_attr_otp5.attr,
+ NULL
+};
+
+static const struct attribute *dpot_attrib_otp_en[] = {
+ &dev_attr_otp0en.attr,
+ &dev_attr_otp1en.attr,
+ &dev_attr_otp2en.attr,
+ &dev_attr_otp3en.attr,
+ &dev_attr_otp4en.attr,
+ &dev_attr_otp5en.attr,
+ NULL
+};
+
+static const struct attribute *dpot_attrib_tolerance[] = {
+ &dev_attr_tolerance0.attr,
+ &dev_attr_tolerance1.attr,
+ &dev_attr_tolerance2.attr,
+ &dev_attr_tolerance3.attr,
+ &dev_attr_tolerance4.attr,
+ &dev_attr_tolerance5.attr,
+ NULL
+};
+
+/* ------------------------------------------------------------------------- */
+
+#define DPOT_DEVICE_DO_CMD(_name, _cmd) static ssize_t \
+set_##_name(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ return sysfs_do_cmd(dev, attr, buf, count, _cmd); \
+} \
+static DEVICE_ATTR(_name, S_IWUSR | S_IRUGO, NULL, set_##_name);
+
+DPOT_DEVICE_DO_CMD(inc_all, DPOT_INC_ALL);
+DPOT_DEVICE_DO_CMD(dec_all, DPOT_DEC_ALL);
+DPOT_DEVICE_DO_CMD(inc_all_6db, DPOT_INC_ALL_6DB);
+DPOT_DEVICE_DO_CMD(dec_all_6db, DPOT_DEC_ALL_6DB);
+
+static struct attribute *ad525x_attributes_commands[] = {
+ &dev_attr_inc_all.attr,
+ &dev_attr_dec_all.attr,
+ &dev_attr_inc_all_6db.attr,
+ &dev_attr_dec_all_6db.attr,
+ NULL
+};
+
+static const struct attribute_group ad525x_group_commands = {
+ .attrs = ad525x_attributes_commands,
+};
+
+__devinit int ad_dpot_add_files(struct device *dev,
+ unsigned features, unsigned rdac)
+{
+ int err = sysfs_create_file(&dev->kobj,
+ dpot_attrib_wipers[rdac]);
+ if (features & F_CMD_EEP)
+ err |= sysfs_create_file(&dev->kobj,
+ dpot_attrib_eeprom[rdac]);
+ if (features & F_CMD_TOL)
+ err |= sysfs_create_file(&dev->kobj,
+ dpot_attrib_tolerance[rdac]);
+ if (features & F_CMD_OTP) {
+ err |= sysfs_create_file(&dev->kobj,
+ dpot_attrib_otp_en[rdac]);
+ err |= sysfs_create_file(&dev->kobj,
+ dpot_attrib_otp[rdac]);
+ }
+
+ if (err)
+ dev_err(dev, "failed to register sysfs hooks for RDAC%d\n",
+ rdac);
+
+ return err;
+}
+
+inline void ad_dpot_remove_files(struct device *dev,
+ unsigned features, unsigned rdac)
+{
+ sysfs_remove_file(&dev->kobj,
+ dpot_attrib_wipers[rdac]);
+ if (features & F_CMD_EEP)
+ sysfs_remove_file(&dev->kobj,
+ dpot_attrib_eeprom[rdac]);
+ if (features & F_CMD_TOL)
+ sysfs_remove_file(&dev->kobj,
+ dpot_attrib_tolerance[rdac]);
+ if (features & F_CMD_OTP) {
+ sysfs_remove_file(&dev->kobj,
+ dpot_attrib_otp_en[rdac]);
+ sysfs_remove_file(&dev->kobj,
+ dpot_attrib_otp[rdac]);
+ }
+}
+
+int __devinit ad_dpot_probe(struct device *dev,
+ struct ad_dpot_bus_data *bdata, unsigned long devid,
+ const char *name)
+{
+
+ struct dpot_data *data;
+ int i, err = 0;
+
+ data = kzalloc(sizeof(struct dpot_data), GFP_KERNEL);
+ if (!data) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ dev_set_drvdata(dev, data);
+ mutex_init(&data->update_lock);
+
+ data->bdata = *bdata;
+ data->devid = devid;
+
+ data->max_pos = 1 << DPOT_MAX_POS(devid);
+ data->rdac_mask = data->max_pos - 1;
+ data->feat = DPOT_FEAT(devid);
+ data->uid = DPOT_UID(devid);
+ data->wipers = DPOT_WIPERS(devid);
+
+ for (i = DPOT_RDAC0; i < MAX_RDACS; i++)
+ if (data->wipers & (1 << i)) {
+ err = ad_dpot_add_files(dev, data->feat, i);
+ if (err)
+ goto exit_remove_files;
+ /* power-up midscale */
+ if (data->feat & F_RDACS_WONLY)
+ data->rdac_cache[i] = data->max_pos / 2;
+ }
+
+ if (data->feat & F_CMD_INC)
+ err = sysfs_create_group(&dev->kobj, &ad525x_group_commands);
+
+ if (err) {
+ dev_err(dev, "failed to register sysfs hooks\n");
+ goto exit_free;
+ }
+
+ dev_info(dev, "%s %d-Position Digital Potentiometer registered\n",
+ name, data->max_pos);
+
+ return 0;
+
+exit_remove_files:
+ for (i = DPOT_RDAC0; i < MAX_RDACS; i++)
+ if (data->wipers & (1 << i))
+ ad_dpot_remove_files(dev, data->feat, i);
+
+exit_free:
+ kfree(data);
+ dev_set_drvdata(dev, NULL);
+exit:
+ dev_err(dev, "failed to create client for %s ID 0x%lX\n",
+ name, devid);
+ return err;
+}
+EXPORT_SYMBOL(ad_dpot_probe);
+
+__devexit int ad_dpot_remove(struct device *dev)
+{
+ struct dpot_data *data = dev_get_drvdata(dev);
+ int i;
+
+ for (i = DPOT_RDAC0; i < MAX_RDACS; i++)
+ if (data->wipers & (1 << i))
+ ad_dpot_remove_files(dev, data->feat, i);
+
+ kfree(data);
+
+ return 0;
+}
+EXPORT_SYMBOL(ad_dpot_remove);
+
+
+MODULE_AUTHOR("Chris Verges <chrisv@cyberswitching.com>, "
+ "Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("Digital potentiometer driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/ad525x_dpot.h b/drivers/misc/ad525x_dpot.h
new file mode 100644
index 00000000..6bd1eba2
--- /dev/null
+++ b/drivers/misc/ad525x_dpot.h
@@ -0,0 +1,215 @@
+/*
+ * Driver for the Analog Devices digital potentiometers
+ *
+ * Copyright (C) 2010 Michael Hennerich, Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef _AD_DPOT_H_
+#define _AD_DPOT_H_
+
+#include <linux/types.h>
+
+#define DPOT_CONF(features, wipers, max_pos, uid) \
+ (((features) << 18) | (((wipers) & 0xFF) << 10) | \
+ ((max_pos & 0xF) << 6) | (uid & 0x3F))
+
+#define DPOT_UID(conf) (conf & 0x3F)
+#define DPOT_MAX_POS(conf) ((conf >> 6) & 0xF)
+#define DPOT_WIPERS(conf) ((conf >> 10) & 0xFF)
+#define DPOT_FEAT(conf) (conf >> 18)
+
+#define BRDAC0 (1 << 0)
+#define BRDAC1 (1 << 1)
+#define BRDAC2 (1 << 2)
+#define BRDAC3 (1 << 3)
+#define BRDAC4 (1 << 4)
+#define BRDAC5 (1 << 5)
+#define MAX_RDACS 6
+
+#define F_CMD_INC (1 << 0) /* Features INC/DEC ALL, 6dB */
+#define F_CMD_EEP (1 << 1) /* Features EEPROM */
+#define F_CMD_OTP (1 << 2) /* Features OTP */
+#define F_CMD_TOL (1 << 3) /* RDACS feature Tolerance REG */
+#define F_RDACS_RW (1 << 4) /* RDACS are Read/Write */
+#define F_RDACS_WONLY (1 << 5) /* RDACS are Write only */
+#define F_AD_APPDATA (1 << 6) /* RDAC Address append to data */
+#define F_SPI_8BIT (1 << 7) /* All SPI XFERS are 8-bit */
+#define F_SPI_16BIT (1 << 8) /* All SPI XFERS are 16-bit */
+#define F_SPI_24BIT (1 << 9) /* All SPI XFERS are 24-bit */
+
+#define F_RDACS_RW_TOL (F_RDACS_RW | F_CMD_EEP | F_CMD_TOL)
+#define F_RDACS_RW_EEP (F_RDACS_RW | F_CMD_EEP)
+#define F_SPI (F_SPI_8BIT | F_SPI_16BIT | F_SPI_24BIT)
+
+enum dpot_devid {
+ AD5258_ID = DPOT_CONF(F_RDACS_RW_TOL, BRDAC0, 6, 0), /* I2C */
+ AD5259_ID = DPOT_CONF(F_RDACS_RW_TOL, BRDAC0, 8, 1),
+ AD5251_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
+ BRDAC1 | BRDAC3, 6, 2),
+ AD5252_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
+ BRDAC1 | BRDAC3, 8, 3),
+ AD5253_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
+ BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 6, 4),
+ AD5254_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
+ BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 8, 5),
+ AD5255_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
+ BRDAC0 | BRDAC1 | BRDAC2, 9, 6),
+ AD5160_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
+ BRDAC0, 8, 7), /* SPI */
+ AD5161_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
+ BRDAC0, 8, 8),
+ AD5162_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
+ BRDAC0 | BRDAC1, 8, 9),
+ AD5165_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
+ BRDAC0, 8, 10),
+ AD5200_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
+ BRDAC0, 8, 11),
+ AD5201_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
+ BRDAC0, 5, 12),
+ AD5203_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
+ BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 6, 13),
+ AD5204_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
+ BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 8, 14),
+ AD5206_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
+ BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3 | BRDAC4 | BRDAC5,
+ 8, 15),
+ AD5207_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
+ BRDAC0 | BRDAC1, 8, 16),
+ AD5231_ID = DPOT_CONF(F_RDACS_RW_EEP | F_CMD_INC | F_SPI_24BIT,
+ BRDAC0, 10, 17),
+ AD5232_ID = DPOT_CONF(F_RDACS_RW_EEP | F_CMD_INC | F_SPI_16BIT,
+ BRDAC0 | BRDAC1, 8, 18),
+ AD5233_ID = DPOT_CONF(F_RDACS_RW_EEP | F_CMD_INC | F_SPI_16BIT,
+ BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 6, 19),
+ AD5235_ID = DPOT_CONF(F_RDACS_RW_EEP | F_CMD_INC | F_SPI_24BIT,
+ BRDAC0 | BRDAC1, 10, 20),
+ AD5260_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
+ BRDAC0, 8, 21),
+ AD5262_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
+ BRDAC0 | BRDAC1, 8, 22),
+ AD5263_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
+ BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 8, 23),
+ AD5290_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
+ BRDAC0, 8, 24),
+ AD5291_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT | F_CMD_OTP,
+ BRDAC0, 8, 25),
+ AD5292_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT | F_CMD_OTP,
+ BRDAC0, 10, 26),
+ AD5293_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT, BRDAC0, 10, 27),
+ AD7376_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
+ BRDAC0, 7, 28),
+ AD8400_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
+ BRDAC0, 8, 29),
+ AD8402_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
+ BRDAC0 | BRDAC1, 8, 30),
+ AD8403_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
+ BRDAC0 | BRDAC1 | BRDAC2, 8, 31),
+ ADN2850_ID = DPOT_CONF(F_RDACS_RW_EEP | F_CMD_INC | F_SPI_24BIT,
+ BRDAC0 | BRDAC1, 10, 32),
+ AD5241_ID = DPOT_CONF(F_RDACS_RW, BRDAC0, 8, 33),
+ AD5242_ID = DPOT_CONF(F_RDACS_RW, BRDAC0 | BRDAC1, 8, 34),
+ AD5243_ID = DPOT_CONF(F_RDACS_RW, BRDAC0 | BRDAC1, 8, 35),
+ AD5245_ID = DPOT_CONF(F_RDACS_RW, BRDAC0, 8, 36),
+ AD5246_ID = DPOT_CONF(F_RDACS_RW, BRDAC0, 7, 37),
+ AD5247_ID = DPOT_CONF(F_RDACS_RW, BRDAC0, 7, 38),
+ AD5248_ID = DPOT_CONF(F_RDACS_RW, BRDAC0 | BRDAC1, 8, 39),
+ AD5280_ID = DPOT_CONF(F_RDACS_RW, BRDAC0, 8, 40),
+ AD5282_ID = DPOT_CONF(F_RDACS_RW, BRDAC0 | BRDAC1, 8, 41),
+ ADN2860_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
+ BRDAC0 | BRDAC1 | BRDAC2, 9, 42),
+ AD5273_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0, 6, 43),
+ AD5171_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0, 6, 44),
+ AD5170_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0, 8, 45),
+ AD5172_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0 | BRDAC1, 8, 46),
+ AD5173_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0 | BRDAC1, 8, 47),
+ AD5270_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP | F_SPI_16BIT,
+ BRDAC0, 10, 48),
+ AD5271_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP | F_SPI_16BIT,
+ BRDAC0, 8, 49),
+ AD5272_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0, 10, 50),
+ AD5274_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0, 8, 51),
+};
+
+#define DPOT_RDAC0 0
+#define DPOT_RDAC1 1
+#define DPOT_RDAC2 2
+#define DPOT_RDAC3 3
+#define DPOT_RDAC4 4
+#define DPOT_RDAC5 5
+
+#define DPOT_RDAC_MASK 0x1F
+
+#define DPOT_REG_TOL 0x18
+#define DPOT_TOL_RDAC0 (DPOT_REG_TOL | DPOT_RDAC0)
+#define DPOT_TOL_RDAC1 (DPOT_REG_TOL | DPOT_RDAC1)
+#define DPOT_TOL_RDAC2 (DPOT_REG_TOL | DPOT_RDAC2)
+#define DPOT_TOL_RDAC3 (DPOT_REG_TOL | DPOT_RDAC3)
+#define DPOT_TOL_RDAC4 (DPOT_REG_TOL | DPOT_RDAC4)
+#define DPOT_TOL_RDAC5 (DPOT_REG_TOL | DPOT_RDAC5)
+
+/* RDAC-to-EEPROM Interface Commands */
+#define DPOT_ADDR_RDAC (0x0 << 5)
+#define DPOT_ADDR_EEPROM (0x1 << 5)
+#define DPOT_ADDR_OTP (0x1 << 6)
+#define DPOT_ADDR_CMD (0x1 << 7)
+#define DPOT_ADDR_OTP_EN (0x1 << 9)
+
+#define DPOT_DEC_ALL_6DB (DPOT_ADDR_CMD | (0x4 << 3))
+#define DPOT_INC_ALL_6DB (DPOT_ADDR_CMD | (0x9 << 3))
+#define DPOT_DEC_ALL (DPOT_ADDR_CMD | (0x6 << 3))
+#define DPOT_INC_ALL (DPOT_ADDR_CMD | (0xB << 3))
+
+#define DPOT_SPI_RDAC 0xB0
+#define DPOT_SPI_EEPROM 0x30
+#define DPOT_SPI_READ_RDAC 0xA0
+#define DPOT_SPI_READ_EEPROM 0x90
+#define DPOT_SPI_DEC_ALL_6DB 0x50
+#define DPOT_SPI_INC_ALL_6DB 0xD0
+#define DPOT_SPI_DEC_ALL 0x70
+#define DPOT_SPI_INC_ALL 0xF0
+
+/* AD5291/2/3 use special commands */
+#define DPOT_AD5291_RDAC 0x01
+#define DPOT_AD5291_READ_RDAC 0x02
+#define DPOT_AD5291_STORE_XTPM 0x03
+#define DPOT_AD5291_CTRLREG 0x06
+#define DPOT_AD5291_UNLOCK_CMD 0x03
+
+/* AD5270/1/2/4 use special commands */
+#define DPOT_AD5270_1_2_4_RDAC 0x01
+#define DPOT_AD5270_1_2_4_READ_RDAC 0x02
+#define DPOT_AD5270_1_2_4_STORE_XTPM 0x03
+#define DPOT_AD5270_1_2_4_CTRLREG 0x07
+#define DPOT_AD5270_1_2_4_UNLOCK_CMD 0x03
+
+#define DPOT_AD5282_RDAC_AB 0x80
+
+#define DPOT_AD5273_FUSE 0x80
+#define DPOT_AD5170_2_3_FUSE 0x20
+#define DPOT_AD5170_2_3_OW 0x08
+#define DPOT_AD5172_3_A0 0x08
+#define DPOT_AD5170_2FUSE 0x80
+
+struct dpot_data;
+
+struct ad_dpot_bus_ops {
+ int (*read_d8) (void *client);
+ int (*read_r8d8) (void *client, u8 reg);
+ int (*read_r8d16) (void *client, u8 reg);
+ int (*write_d8) (void *client, u8 val);
+ int (*write_r8d8) (void *client, u8 reg, u8 val);
+ int (*write_r8d16) (void *client, u8 reg, u16 val);
+};
+
+struct ad_dpot_bus_data {
+ void *client;
+ const struct ad_dpot_bus_ops *bops;
+};
+
+int ad_dpot_probe(struct device *dev, struct ad_dpot_bus_data *bdata,
+ unsigned long devid, const char *name);
+int ad_dpot_remove(struct device *dev);
+
+#endif
diff --git a/drivers/misc/akm8975.c b/drivers/misc/akm8975.c
new file mode 100644
index 00000000..830d2897
--- /dev/null
+++ b/drivers/misc/akm8975.c
@@ -0,0 +1,732 @@
+/* drivers/misc/akm8975.c - akm8975 compass driver
+ *
+ * Copyright (C) 2007-2008 HTC Corporation.
+ * Author: Hou-Kun Chen <houkun.chen@gmail.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*
+ * Revised by AKM 2009/04/02
+ * Revised by Motorola 2010/05/27
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/miscdevice.h>
+#include <linux/gpio.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/workqueue.h>
+#include <linux/freezer.h>
+#include <linux/akm8975.h>
+#include <linux/earlysuspend.h>
+
+#define AK8975DRV_CALL_DBG 0
+#if AK8975DRV_CALL_DBG
+#define FUNCDBG(msg) pr_err("%s:%s\n", __func__, msg);
+#else
+#define FUNCDBG(msg)
+#endif
+
+#define AK8975DRV_DATA_DBG 0
+#define MAX_FAILURE_COUNT 10
+
+struct akm8975_data {
+ struct i2c_client *this_client;
+ struct akm8975_platform_data *pdata;
+ struct input_dev *input_dev;
+ struct work_struct work;
+ struct mutex flags_lock;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ struct early_suspend early_suspend;
+#endif
+};
+
+/*
+* Because misc devices can not carry a pointer from driver register to
+* open, we keep this global. This limits the driver to a single instance.
+*/
+struct akm8975_data *akmd_data;
+
+static DECLARE_WAIT_QUEUE_HEAD(open_wq);
+
+static atomic_t open_flag;
+
+static short m_flag;
+static short a_flag;
+static short t_flag;
+static short mv_flag;
+
+static short akmd_delay;
+
+static ssize_t akm8975_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ return sprintf(buf, "%u\n", i2c_smbus_read_byte_data(client,
+ AK8975_REG_CNTL));
+}
+static ssize_t akm8975_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ unsigned long val;
+ strict_strtoul(buf, 10, &val);
+ if (val > 0xff)
+ return -EINVAL;
+ i2c_smbus_write_byte_data(client, AK8975_REG_CNTL, val);
+ return count;
+}
+static DEVICE_ATTR(akm_ms1, S_IWUSR | S_IRUGO, akm8975_show, akm8975_store);
+
+static int akm8975_i2c_rxdata(struct akm8975_data *akm, char *buf, int length)
+{
+ struct i2c_msg msgs[] = {
+ {
+ .addr = akm->this_client->addr,
+ .flags = 0,
+ .len = 1,
+ .buf = buf,
+ },
+ {
+ .addr = akm->this_client->addr,
+ .flags = I2C_M_RD,
+ .len = length,
+ .buf = buf,
+ },
+ };
+
+ FUNCDBG("called");
+
+ if (i2c_transfer(akm->this_client->adapter, msgs, 2) < 0) {
+ pr_err("akm8975_i2c_rxdata: transfer error\n");
+ return EIO;
+ } else
+ return 0;
+}
+
+static int akm8975_i2c_txdata(struct akm8975_data *akm, char *buf, int length)
+{
+ struct i2c_msg msgs[] = {
+ {
+ .addr = akm->this_client->addr,
+ .flags = 0,
+ .len = length,
+ .buf = buf,
+ },
+ };
+
+ FUNCDBG("called");
+
+ if (i2c_transfer(akm->this_client->adapter, msgs, 1) < 0) {
+ pr_err("akm8975_i2c_txdata: transfer error\n");
+ return -EIO;
+ } else
+ return 0;
+}
+
+static void akm8975_ecs_report_value(struct akm8975_data *akm, short *rbuf)
+{
+ struct akm8975_data *data = i2c_get_clientdata(akm->this_client);
+
+ FUNCDBG("called");
+
+#if AK8975DRV_DATA_DBG
+ pr_info("akm8975_ecs_report_value: yaw = %d, pitch = %d, roll = %d\n",
+ rbuf[0], rbuf[1], rbuf[2]);
+ pr_info("tmp = %d, m_stat= %d, g_stat=%d\n", rbuf[3], rbuf[4], rbuf[5]);
+ pr_info("Acceleration: x = %d LSB, y = %d LSB, z = %d LSB\n",
+ rbuf[6], rbuf[7], rbuf[8]);
+ pr_info("Magnetic: x = %d LSB, y = %d LSB, z = %d LSB\n\n",
+ rbuf[9], rbuf[10], rbuf[11]);
+#endif
+ mutex_lock(&akm->flags_lock);
+ /* Report magnetic sensor information */
+ if (m_flag) {
+ input_report_abs(data->input_dev, ABS_RX, rbuf[0]);
+ input_report_abs(data->input_dev, ABS_RY, rbuf[1]);
+ input_report_abs(data->input_dev, ABS_RZ, rbuf[2]);
+ input_report_abs(data->input_dev, ABS_RUDDER, rbuf[4]);
+ }
+
+ /* Report acceleration sensor information */
+ if (a_flag) {
+ input_report_abs(data->input_dev, ABS_X, rbuf[6]);
+ input_report_abs(data->input_dev, ABS_Y, rbuf[7]);
+ input_report_abs(data->input_dev, ABS_Z, rbuf[8]);
+ input_report_abs(data->input_dev, ABS_WHEEL, rbuf[5]);
+ }
+
+ /* Report temperature information */
+ if (t_flag)
+ input_report_abs(data->input_dev, ABS_THROTTLE, rbuf[3]);
+
+ if (mv_flag) {
+ input_report_abs(data->input_dev, ABS_HAT0X, rbuf[9]);
+ input_report_abs(data->input_dev, ABS_HAT0Y, rbuf[10]);
+ input_report_abs(data->input_dev, ABS_BRAKE, rbuf[11]);
+ }
+ mutex_unlock(&akm->flags_lock);
+
+ input_sync(data->input_dev);
+}
+
+static void akm8975_ecs_close_done(struct akm8975_data *akm)
+{
+ FUNCDBG("called");
+ mutex_lock(&akm->flags_lock);
+ m_flag = 1;
+ a_flag = 1;
+ t_flag = 1;
+ mv_flag = 1;
+ mutex_unlock(&akm->flags_lock);
+}
+
+static int akm_aot_open(struct inode *inode, struct file *file)
+{
+ int ret = -1;
+
+ FUNCDBG("called");
+ if (atomic_cmpxchg(&open_flag, 0, 1) == 0) {
+ wake_up(&open_wq);
+ ret = 0;
+ }
+
+ ret = nonseekable_open(inode, file);
+ if (ret)
+ return ret;
+
+ file->private_data = akmd_data;
+
+ return ret;
+}
+
+static int akm_aot_release(struct inode *inode, struct file *file)
+{
+ FUNCDBG("called");
+ atomic_set(&open_flag, 0);
+ wake_up(&open_wq);
+ return 0;
+}
+
+static int akm_aot_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ void __user *argp = (void __user *) arg;
+ short flag;
+ struct akm8975_data *akm = file->private_data;
+
+ FUNCDBG("called");
+
+ switch (cmd) {
+ case ECS_IOCTL_APP_SET_MFLAG:
+ case ECS_IOCTL_APP_SET_AFLAG:
+ case ECS_IOCTL_APP_SET_MVFLAG:
+ if (copy_from_user(&flag, argp, sizeof(flag)))
+ return -EFAULT;
+ if (flag < 0 || flag > 1)
+ return -EINVAL;
+ break;
+ case ECS_IOCTL_APP_SET_DELAY:
+ if (copy_from_user(&flag, argp, sizeof(flag)))
+ return -EFAULT;
+ break;
+ default:
+ break;
+ }
+
+ mutex_lock(&akm->flags_lock);
+ switch (cmd) {
+ case ECS_IOCTL_APP_SET_MFLAG:
+ m_flag = flag;
+ break;
+ case ECS_IOCTL_APP_GET_MFLAG:
+ flag = m_flag;
+ break;
+ case ECS_IOCTL_APP_SET_AFLAG:
+ a_flag = flag;
+ break;
+ case ECS_IOCTL_APP_GET_AFLAG:
+ flag = a_flag;
+ break;
+ case ECS_IOCTL_APP_SET_MVFLAG:
+ mv_flag = flag;
+ break;
+ case ECS_IOCTL_APP_GET_MVFLAG:
+ flag = mv_flag;
+ break;
+ case ECS_IOCTL_APP_SET_DELAY:
+ akmd_delay = flag;
+ break;
+ case ECS_IOCTL_APP_GET_DELAY:
+ flag = akmd_delay;
+ break;
+ default:
+ return -ENOTTY;
+ }
+ mutex_unlock(&akm->flags_lock);
+
+ switch (cmd) {
+ case ECS_IOCTL_APP_GET_MFLAG:
+ case ECS_IOCTL_APP_GET_AFLAG:
+ case ECS_IOCTL_APP_GET_MVFLAG:
+ case ECS_IOCTL_APP_GET_DELAY:
+ if (copy_to_user(argp, &flag, sizeof(flag)))
+ return -EFAULT;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int akmd_open(struct inode *inode, struct file *file)
+{
+ int err = 0;
+
+ FUNCDBG("called");
+ err = nonseekable_open(inode, file);
+ if (err)
+ return err;
+
+ file->private_data = akmd_data;
+ return 0;
+}
+
+static int akmd_release(struct inode *inode, struct file *file)
+{
+ struct akm8975_data *akm = file->private_data;
+
+ FUNCDBG("called");
+ akm8975_ecs_close_done(akm);
+ return 0;
+}
+
+static int akmd_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ void __user *argp = (void __user *) arg;
+
+ char rwbuf[16];
+ int ret = -1;
+ int status;
+ short value[12];
+ short delay;
+ struct akm8975_data *akm = file->private_data;
+
+ FUNCDBG("called");
+
+ switch (cmd) {
+ case ECS_IOCTL_READ:
+ case ECS_IOCTL_WRITE:
+ if (copy_from_user(&rwbuf, argp, sizeof(rwbuf)))
+ return -EFAULT;
+ break;
+
+ case ECS_IOCTL_SET_YPR:
+ if (copy_from_user(&value, argp, sizeof(value)))
+ return -EFAULT;
+ break;
+
+ default:
+ break;
+ }
+
+ switch (cmd) {
+ case ECS_IOCTL_READ:
+ if (rwbuf[0] < 1)
+ return -EINVAL;
+
+ ret = akm8975_i2c_rxdata(akm, &rwbuf[1], rwbuf[0]);
+ if (ret < 0)
+ return ret;
+ break;
+
+ case ECS_IOCTL_WRITE:
+ if (rwbuf[0] < 2)
+ return -EINVAL;
+
+ ret = akm8975_i2c_txdata(akm, &rwbuf[1], rwbuf[0]);
+ if (ret < 0)
+ return ret;
+ break;
+ case ECS_IOCTL_SET_YPR:
+ akm8975_ecs_report_value(akm, value);
+ break;
+
+ case ECS_IOCTL_GET_OPEN_STATUS:
+ wait_event_interruptible(open_wq,
+ (atomic_read(&open_flag) != 0));
+ status = atomic_read(&open_flag);
+ break;
+ case ECS_IOCTL_GET_CLOSE_STATUS:
+ wait_event_interruptible(open_wq,
+ (atomic_read(&open_flag) == 0));
+ status = atomic_read(&open_flag);
+ break;
+
+ case ECS_IOCTL_GET_DELAY:
+ delay = akmd_delay;
+ break;
+
+ default:
+ FUNCDBG("Unknown cmd\n");
+ return -ENOTTY;
+ }
+
+ switch (cmd) {
+ case ECS_IOCTL_READ:
+ if (copy_to_user(argp, &rwbuf, sizeof(rwbuf)))
+ return -EFAULT;
+ break;
+ case ECS_IOCTL_GET_OPEN_STATUS:
+ case ECS_IOCTL_GET_CLOSE_STATUS:
+ if (copy_to_user(argp, &status, sizeof(status)))
+ return -EFAULT;
+ break;
+ case ECS_IOCTL_GET_DELAY:
+ if (copy_to_user(argp, &delay, sizeof(delay)))
+ return -EFAULT;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/* needed to clear the int. pin */
+static void akm_work_func(struct work_struct *work)
+{
+ struct akm8975_data *akm =
+ container_of(work, struct akm8975_data, work);
+
+ FUNCDBG("called");
+ enable_irq(akm->this_client->irq);
+}
+
+static irqreturn_t akm8975_interrupt(int irq, void *dev_id)
+{
+ struct akm8975_data *akm = dev_id;
+ FUNCDBG("called");
+
+ disable_irq_nosync(akm->this_client->irq);
+ schedule_work(&akm->work);
+ return IRQ_HANDLED;
+}
+
+static int akm8975_power_off(struct akm8975_data *akm)
+{
+#if AK8975DRV_CALL_DBG
+ pr_info("%s\n", __func__);
+#endif
+ if (akm->pdata->power_off)
+ akm->pdata->power_off();
+
+ return 0;
+}
+
+static int akm8975_power_on(struct akm8975_data *akm)
+{
+ int err;
+
+#if AK8975DRV_CALL_DBG
+ pr_info("%s\n", __func__);
+#endif
+ if (akm->pdata->power_on) {
+ err = akm->pdata->power_on();
+ if (err < 0)
+ return err;
+ }
+ return 0;
+}
+
+static int akm8975_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+ struct akm8975_data *akm = i2c_get_clientdata(client);
+
+#if AK8975DRV_CALL_DBG
+ pr_info("%s\n", __func__);
+#endif
+ /* TO DO: might need more work after power mgmt
+ is enabled */
+ return akm8975_power_off(akm);
+}
+
+static int akm8975_resume(struct i2c_client *client)
+{
+ struct akm8975_data *akm = i2c_get_clientdata(client);
+
+#if AK8975DRV_CALL_DBG
+ pr_info("%s\n", __func__);
+#endif
+ /* TO DO: might need more work after power mgmt
+ is enabled */
+ return akm8975_power_on(akm);
+}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void akm8975_early_suspend(struct early_suspend *handler)
+{
+ struct akm8975_data *akm;
+ akm = container_of(handler, struct akm8975_data, early_suspend);
+
+#if AK8975DRV_CALL_DBG
+ pr_info("%s\n", __func__);
+#endif
+ akm8975_suspend(akm->this_client, PMSG_SUSPEND);
+}
+
+static void akm8975_early_resume(struct early_suspend *handler)
+{
+ struct akm8975_data *akm;
+ akm = container_of(handler, struct akm8975_data, early_suspend);
+
+#if AK8975DRV_CALL_DBG
+ pr_info("%s\n", __func__);
+#endif
+ akm8975_resume(akm->this_client);
+}
+#endif
+
+
+static int akm8975_init_client(struct i2c_client *client)
+{
+ struct akm8975_data *data;
+ int ret;
+
+ data = i2c_get_clientdata(client);
+
+ ret = request_irq(client->irq, akm8975_interrupt, IRQF_TRIGGER_RISING,
+ "akm8975", data);
+
+ if (ret < 0) {
+ pr_err("akm8975_init_client: request irq failed\n");
+ goto err;
+ }
+
+ init_waitqueue_head(&open_wq);
+
+ mutex_lock(&data->flags_lock);
+ m_flag = 1;
+ a_flag = 1;
+ t_flag = 1;
+ mv_flag = 1;
+ mutex_unlock(&data->flags_lock);
+
+ return 0;
+err:
+ return ret;
+}
+
+static const struct file_operations akmd_fops = {
+ .owner = THIS_MODULE,
+ .open = akmd_open,
+ .release = akmd_release,
+ .ioctl = akmd_ioctl,
+};
+
+static const struct file_operations akm_aot_fops = {
+ .owner = THIS_MODULE,
+ .open = akm_aot_open,
+ .release = akm_aot_release,
+ .ioctl = akm_aot_ioctl,
+};
+
+static struct miscdevice akm_aot_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "akm8975_aot",
+ .fops = &akm_aot_fops,
+};
+
+static struct miscdevice akmd_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "akm8975_dev",
+ .fops = &akmd_fops,
+};
+
+int akm8975_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
+{
+ struct akm8975_data *akm;
+ int err;
+ FUNCDBG("called");
+
+ if (client->dev.platform_data == NULL) {
+ dev_err(&client->dev, "platform data is NULL. exiting.\n");
+ err = -ENODEV;
+ goto exit_platform_data_null;
+ }
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "platform data is NULL. exiting.\n");
+ err = -ENODEV;
+ goto exit_check_functionality_failed;
+ }
+
+ akm = kzalloc(sizeof(struct akm8975_data), GFP_KERNEL);
+ if (!akm) {
+ dev_err(&client->dev,
+ "failed to allocate memory for module data\n");
+ err = -ENOMEM;
+ goto exit_alloc_data_failed;
+ }
+
+ akm->pdata = client->dev.platform_data;
+
+ mutex_init(&akm->flags_lock);
+ INIT_WORK(&akm->work, akm_work_func);
+ i2c_set_clientdata(client, akm);
+
+ err = akm8975_power_on(akm);
+ if (err < 0)
+ goto exit_power_on_failed;
+
+ akm8975_init_client(client);
+ akm->this_client = client;
+ akmd_data = akm;
+
+ akm->input_dev = input_allocate_device();
+ if (!akm->input_dev) {
+ err = -ENOMEM;
+ dev_err(&akm->this_client->dev,
+ "input device allocate failed\n");
+ goto exit_input_dev_alloc_failed;
+ }
+
+ set_bit(EV_ABS, akm->input_dev->evbit);
+
+ /* yaw */
+ input_set_abs_params(akm->input_dev, ABS_RX, 0, 23040, 0, 0);
+ /* pitch */
+ input_set_abs_params(akm->input_dev, ABS_RY, -11520, 11520, 0, 0);
+ /* roll */
+ input_set_abs_params(akm->input_dev, ABS_RZ, -5760, 5760, 0, 0);
+ /* x-axis acceleration */
+ input_set_abs_params(akm->input_dev, ABS_X, -5760, 5760, 0, 0);
+ /* y-axis acceleration */
+ input_set_abs_params(akm->input_dev, ABS_Y, -5760, 5760, 0, 0);
+ /* z-axis acceleration */
+ input_set_abs_params(akm->input_dev, ABS_Z, -5760, 5760, 0, 0);
+ /* temparature */
+ input_set_abs_params(akm->input_dev, ABS_THROTTLE, -30, 85, 0, 0);
+ /* status of magnetic sensor */
+ input_set_abs_params(akm->input_dev, ABS_RUDDER, 0, 3, 0, 0);
+ /* status of acceleration sensor */
+ input_set_abs_params(akm->input_dev, ABS_WHEEL, 0, 3, 0, 0);
+ /* x-axis of raw magnetic vector */
+ input_set_abs_params(akm->input_dev, ABS_HAT0X, -20480, 20479, 0, 0);
+ /* y-axis of raw magnetic vector */
+ input_set_abs_params(akm->input_dev, ABS_HAT0Y, -20480, 20479, 0, 0);
+ /* z-axis of raw magnetic vector */
+ input_set_abs_params(akm->input_dev, ABS_BRAKE, -20480, 20479, 0, 0);
+
+ akm->input_dev->name = "compass";
+
+ err = input_register_device(akm->input_dev);
+ if (err) {
+ pr_err("akm8975_probe: Unable to register input device: %s\n",
+ akm->input_dev->name);
+ goto exit_input_register_device_failed;
+ }
+
+ err = misc_register(&akmd_device);
+ if (err) {
+ pr_err("akm8975_probe: akmd_device register failed\n");
+ goto exit_misc_device_register_failed;
+ }
+
+ err = misc_register(&akm_aot_device);
+ if (err) {
+ pr_err("akm8975_probe: akm_aot_device register failed\n");
+ goto exit_misc_device_register_failed;
+ }
+
+ err = device_create_file(&client->dev, &dev_attr_akm_ms1);
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ akm->early_suspend.suspend = akm8975_early_suspend;
+ akm->early_suspend.resume = akm8975_early_resume;
+ register_early_suspend(&akm->early_suspend);
+#endif
+ return 0;
+
+exit_misc_device_register_failed:
+exit_input_register_device_failed:
+ input_free_device(akm->input_dev);
+exit_input_dev_alloc_failed:
+ akm8975_power_off(akm);
+exit_power_on_failed:
+ kfree(akm);
+exit_alloc_data_failed:
+exit_check_functionality_failed:
+exit_platform_data_null:
+ return err;
+}
+
+static int __devexit akm8975_remove(struct i2c_client *client)
+{
+ struct akm8975_data *akm = i2c_get_clientdata(client);
+ FUNCDBG("called");
+ free_irq(client->irq, NULL);
+ input_unregister_device(akm->input_dev);
+ misc_deregister(&akmd_device);
+ misc_deregister(&akm_aot_device);
+ akm8975_power_off(akm);
+ kfree(akm);
+ return 0;
+}
+
+static const struct i2c_device_id akm8975_id[] = {
+ { "akm8975", 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, akm8975_id);
+
+static struct i2c_driver akm8975_driver = {
+ .probe = akm8975_probe,
+ .remove = akm8975_remove,
+#ifndef CONFIG_HAS_EARLYSUSPEND
+ .resume = akm8975_resume,
+ .suspend = akm8975_suspend,
+#endif
+ .id_table = akm8975_id,
+ .driver = {
+ .name = "akm8975",
+ },
+};
+
+static int __init akm8975_init(void)
+{
+ pr_info("AK8975 compass driver: init\n");
+ FUNCDBG("AK8975 compass driver: init\n");
+ return i2c_add_driver(&akm8975_driver);
+}
+
+static void __exit akm8975_exit(void)
+{
+ FUNCDBG("AK8975 compass driver: exit\n");
+ i2c_del_driver(&akm8975_driver);
+}
+
+module_init(akm8975_init);
+module_exit(akm8975_exit);
+
+MODULE_AUTHOR("Hou-Kun Chen <hk_chen@htc.com>");
+MODULE_DESCRIPTION("AK8975 compass driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/altera-stapl/Kconfig b/drivers/misc/altera-stapl/Kconfig
new file mode 100644
index 00000000..7f01d8e9
--- /dev/null
+++ b/drivers/misc/altera-stapl/Kconfig
@@ -0,0 +1,8 @@
+comment "Altera FPGA firmware download module"
+
+config ALTERA_STAPL
+ tristate "Altera FPGA firmware download module"
+ depends on I2C
+ default n
+ help
+ An Altera FPGA module. Say Y when you want to support this tool.
diff --git a/drivers/misc/altera-stapl/Makefile b/drivers/misc/altera-stapl/Makefile
new file mode 100644
index 00000000..055f61ee
--- /dev/null
+++ b/drivers/misc/altera-stapl/Makefile
@@ -0,0 +1,3 @@
+altera-stapl-objs = altera-lpt.o altera-jtag.o altera-comp.o altera.o
+
+obj-$(CONFIG_ALTERA_STAPL) += altera-stapl.o
diff --git a/drivers/misc/altera-stapl/altera-comp.c b/drivers/misc/altera-stapl/altera-comp.c
new file mode 100644
index 00000000..49b103be
--- /dev/null
+++ b/drivers/misc/altera-stapl/altera-comp.c
@@ -0,0 +1,142 @@
+/*
+ * altera-comp.c
+ *
+ * altera FPGA driver
+ *
+ * Copyright (C) Altera Corporation 1998-2001
+ * Copyright (C) 2010 NetUP Inc.
+ * Copyright (C) 2010 Igor M. Liplianin <liplianin@netup.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ *
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include "altera-exprt.h"
+
+#define SHORT_BITS 16
+#define CHAR_BITS 8
+#define DATA_BLOB_LENGTH 3
+#define MATCH_DATA_LENGTH 8192
+#define ALTERA_REQUEST_SIZE 1024
+#define ALTERA_BUFFER_SIZE (MATCH_DATA_LENGTH + ALTERA_REQUEST_SIZE)
+
+static u32 altera_bits_req(u32 n)
+{
+ u32 result = SHORT_BITS;
+
+ if (n == 0)
+ result = 1;
+ else {
+ /* Look for the highest non-zero bit position */
+ while ((n & (1 << (SHORT_BITS - 1))) == 0) {
+ n <<= 1;
+ --result;
+ }
+ }
+
+ return result;
+}
+
+static u32 altera_read_packed(u8 *buffer, u32 bits, u32 *bits_avail,
+ u32 *in_index)
+{
+ u32 result = 0;
+ u32 shift = 0;
+ u32 databyte = 0;
+
+ while (bits > 0) {
+ databyte = buffer[*in_index];
+ result |= (((databyte >> (CHAR_BITS - *bits_avail))
+ & (0xff >> (CHAR_BITS - *bits_avail))) << shift);
+
+ if (bits <= *bits_avail) {
+ result &= (0xffff >> (SHORT_BITS - (bits + shift)));
+ *bits_avail -= bits;
+ bits = 0;
+ } else {
+ ++(*in_index);
+ shift += *bits_avail;
+ bits -= *bits_avail;
+ *bits_avail = CHAR_BITS;
+ }
+ }
+
+ return result;
+}
+
+u32 altera_shrink(u8 *in, u32 in_length, u8 *out, u32 out_length, s32 version)
+{
+ u32 i, j, data_length = 0L;
+ u32 offset, length;
+ u32 match_data_length = MATCH_DATA_LENGTH;
+ u32 bits_avail = CHAR_BITS;
+ u32 in_index = 0L;
+
+ if (version > 0)
+ --match_data_length;
+
+ for (i = 0; i < out_length; ++i)
+ out[i] = 0;
+
+ /* Read number of bytes in data. */
+ for (i = 0; i < sizeof(in_length); ++i) {
+ data_length = data_length | (
+ altera_read_packed(in,
+ CHAR_BITS,
+ &bits_avail,
+ &in_index) << (i * CHAR_BITS));
+ }
+
+ if (data_length > out_length) {
+ data_length = 0L;
+ return data_length;
+ }
+
+ i = 0;
+ while (i < data_length) {
+ /* A 0 bit indicates literal data. */
+ if (altera_read_packed(in, 1, &bits_avail,
+ &in_index) == 0) {
+ for (j = 0; j < DATA_BLOB_LENGTH; ++j) {
+ if (i < data_length) {
+ out[i] = (u8)altera_read_packed(in,
+ CHAR_BITS,
+ &bits_avail,
+ &in_index);
+ i++;
+ }
+ }
+ } else {
+ /* A 1 bit indicates offset/length to follow. */
+ offset = altera_read_packed(in, altera_bits_req((s16)
+ (i > match_data_length ?
+ match_data_length : i)),
+ &bits_avail,
+ &in_index);
+ length = altera_read_packed(in, CHAR_BITS,
+ &bits_avail,
+ &in_index);
+ for (j = 0; j < length; ++j) {
+ if (i < data_length) {
+ out[i] = out[i - offset];
+ i++;
+ }
+ }
+ }
+ }
+
+ return data_length;
+}
diff --git a/drivers/misc/altera-stapl/altera-exprt.h b/drivers/misc/altera-stapl/altera-exprt.h
new file mode 100644
index 00000000..39c38d84
--- /dev/null
+++ b/drivers/misc/altera-stapl/altera-exprt.h
@@ -0,0 +1,33 @@
+/*
+ * altera-exprt.h
+ *
+ * altera FPGA driver
+ *
+ * Copyright (C) Altera Corporation 1998-2001
+ * Copyright (C) 2010 NetUP Inc.
+ * Copyright (C) 2010 Igor M. Liplianin <liplianin@netup.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ *
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef ALTERA_EXPRT_H
+#define ALTERA_EXPRT_H
+
+
+u32 altera_shrink(u8 *in, u32 in_length, u8 *out, u32 out_length, s32 version);
+int netup_jtag_io_lpt(void *device, int tms, int tdi, int read_tdo);
+
+#endif /* ALTERA_EXPRT_H */
diff --git a/drivers/misc/altera-stapl/altera-jtag.c b/drivers/misc/altera-stapl/altera-jtag.c
new file mode 100644
index 00000000..f4bf2009
--- /dev/null
+++ b/drivers/misc/altera-stapl/altera-jtag.c
@@ -0,0 +1,1021 @@
+/*
+ * altera-jtag.c
+ *
+ * altera FPGA driver
+ *
+ * Copyright (C) Altera Corporation 1998-2001
+ * Copyright (C) 2010 NetUP Inc.
+ * Copyright (C) 2010 Igor M. Liplianin <liplianin@netup.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ *
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <misc/altera.h>
+#include "altera-exprt.h"
+#include "altera-jtag.h"
+
+#define alt_jtag_io(a, b, c)\
+ astate->config->jtag_io(astate->config->dev, a, b, c);
+
+#define alt_malloc(a) kzalloc(a, GFP_KERNEL);
+
+/*
+ * This structure shows, for each JTAG state, which state is reached after
+ * a single TCK clock cycle with TMS high or TMS low, respectively. This
+ * describes all possible state transitions in the JTAG state machine.
+ */
+struct altera_jtag_machine {
+ enum altera_jtag_state tms_high;
+ enum altera_jtag_state tms_low;
+};
+
+static const struct altera_jtag_machine altera_transitions[] = {
+ /* RESET */ { RESET, IDLE },
+ /* IDLE */ { DRSELECT, IDLE },
+ /* DRSELECT */ { IRSELECT, DRCAPTURE },
+ /* DRCAPTURE */ { DREXIT1, DRSHIFT },
+ /* DRSHIFT */ { DREXIT1, DRSHIFT },
+ /* DREXIT1 */ { DRUPDATE, DRPAUSE },
+ /* DRPAUSE */ { DREXIT2, DRPAUSE },
+ /* DREXIT2 */ { DRUPDATE, DRSHIFT },
+ /* DRUPDATE */ { DRSELECT, IDLE },
+ /* IRSELECT */ { RESET, IRCAPTURE },
+ /* IRCAPTURE */ { IREXIT1, IRSHIFT },
+ /* IRSHIFT */ { IREXIT1, IRSHIFT },
+ /* IREXIT1 */ { IRUPDATE, IRPAUSE },
+ /* IRPAUSE */ { IREXIT2, IRPAUSE },
+ /* IREXIT2 */ { IRUPDATE, IRSHIFT },
+ /* IRUPDATE */ { DRSELECT, IDLE }
+};
+
+/*
+ * This table contains the TMS value to be used to take the NEXT STEP on
+ * the path to the desired state. The array index is the current state,
+ * and the bit position is the desired endstate. To find out which state
+ * is used as the intermediate state, look up the TMS value in the
+ * altera_transitions[] table.
+ */
+static const u16 altera_jtag_path_map[16] = {
+ /* RST RTI SDRS CDR SDR E1DR PDR E2DR */
+ 0x0001, 0xFFFD, 0xFE01, 0xFFE7, 0xFFEF, 0xFF0F, 0xFFBF, 0xFFFF,
+ /* UDR SIRS CIR SIR E1IR PIR E2IR UIR */
+ 0xFEFD, 0x0001, 0xF3FF, 0xF7FF, 0x87FF, 0xDFFF, 0xFFFF, 0x7FFD
+};
+
+/* Flag bits for alt_jtag_io() function */
+#define TMS_HIGH 1
+#define TMS_LOW 0
+#define TDI_HIGH 1
+#define TDI_LOW 0
+#define READ_TDO 1
+#define IGNORE_TDO 0
+
+int altera_jinit(struct altera_state *astate)
+{
+ struct altera_jtag *js = &astate->js;
+
+ /* initial JTAG state is unknown */
+ js->jtag_state = ILLEGAL_JTAG_STATE;
+
+ /* initialize to default state */
+ js->drstop_state = IDLE;
+ js->irstop_state = IDLE;
+ js->dr_pre = 0;
+ js->dr_post = 0;
+ js->ir_pre = 0;
+ js->ir_post = 0;
+ js->dr_length = 0;
+ js->ir_length = 0;
+
+ js->dr_pre_data = NULL;
+ js->dr_post_data = NULL;
+ js->ir_pre_data = NULL;
+ js->ir_post_data = NULL;
+ js->dr_buffer = NULL;
+ js->ir_buffer = NULL;
+
+ return 0;
+}
+
+int altera_set_drstop(struct altera_jtag *js, enum altera_jtag_state state)
+{
+ js->drstop_state = state;
+
+ return 0;
+}
+
+int altera_set_irstop(struct altera_jtag *js, enum altera_jtag_state state)
+{
+ js->irstop_state = state;
+
+ return 0;
+}
+
+int altera_set_dr_pre(struct altera_jtag *js,
+ u32 count, u32 start_index,
+ u8 *preamble_data)
+{
+ int status = 0;
+ u32 i;
+ u32 j;
+
+ if (count > js->dr_pre) {
+ kfree(js->dr_pre_data);
+ js->dr_pre_data = (u8 *)alt_malloc((count + 7) >> 3);
+ if (js->dr_pre_data == NULL)
+ status = -ENOMEM;
+ else
+ js->dr_pre = count;
+ } else
+ js->dr_pre = count;
+
+ if (status == 0) {
+ for (i = 0; i < count; ++i) {
+ j = i + start_index;
+
+ if (preamble_data == NULL)
+ js->dr_pre_data[i >> 3] |= (1 << (i & 7));
+ else {
+ if (preamble_data[j >> 3] & (1 << (j & 7)))
+ js->dr_pre_data[i >> 3] |=
+ (1 << (i & 7));
+ else
+ js->dr_pre_data[i >> 3] &=
+ ~(u32)(1 << (i & 7));
+
+ }
+ }
+ }
+
+ return status;
+}
+
+int altera_set_ir_pre(struct altera_jtag *js, u32 count, u32 start_index,
+ u8 *preamble_data)
+{
+ int status = 0;
+ u32 i;
+ u32 j;
+
+ if (count > js->ir_pre) {
+ kfree(js->ir_pre_data);
+ js->ir_pre_data = (u8 *)alt_malloc((count + 7) >> 3);
+ if (js->ir_pre_data == NULL)
+ status = -ENOMEM;
+ else
+ js->ir_pre = count;
+
+ } else
+ js->ir_pre = count;
+
+ if (status == 0) {
+ for (i = 0; i < count; ++i) {
+ j = i + start_index;
+ if (preamble_data == NULL)
+ js->ir_pre_data[i >> 3] |= (1 << (i & 7));
+ else {
+ if (preamble_data[j >> 3] & (1 << (j & 7)))
+ js->ir_pre_data[i >> 3] |=
+ (1 << (i & 7));
+ else
+ js->ir_pre_data[i >> 3] &=
+ ~(u32)(1 << (i & 7));
+
+ }
+ }
+ }
+
+ return status;
+}
+
+int altera_set_dr_post(struct altera_jtag *js, u32 count, u32 start_index,
+ u8 *postamble_data)
+{
+ int status = 0;
+ u32 i;
+ u32 j;
+
+ if (count > js->dr_post) {
+ kfree(js->dr_post_data);
+ js->dr_post_data = (u8 *)alt_malloc((count + 7) >> 3);
+
+ if (js->dr_post_data == NULL)
+ status = -ENOMEM;
+ else
+ js->dr_post = count;
+
+ } else
+ js->dr_post = count;
+
+ if (status == 0) {
+ for (i = 0; i < count; ++i) {
+ j = i + start_index;
+
+ if (postamble_data == NULL)
+ js->dr_post_data[i >> 3] |= (1 << (i & 7));
+ else {
+ if (postamble_data[j >> 3] & (1 << (j & 7)))
+ js->dr_post_data[i >> 3] |=
+ (1 << (i & 7));
+ else
+ js->dr_post_data[i >> 3] &=
+ ~(u32)(1 << (i & 7));
+
+ }
+ }
+ }
+
+ return status;
+}
+
+int altera_set_ir_post(struct altera_jtag *js, u32 count, u32 start_index,
+ u8 *postamble_data)
+{
+ int status = 0;
+ u32 i;
+ u32 j;
+
+ if (count > js->ir_post) {
+ kfree(js->ir_post_data);
+ js->ir_post_data = (u8 *)alt_malloc((count + 7) >> 3);
+ if (js->ir_post_data == NULL)
+ status = -ENOMEM;
+ else
+ js->ir_post = count;
+
+ } else
+ js->ir_post = count;
+
+ if (status != 0)
+ return status;
+
+ for (i = 0; i < count; ++i) {
+ j = i + start_index;
+
+ if (postamble_data == NULL)
+ js->ir_post_data[i >> 3] |= (1 << (i & 7));
+ else {
+ if (postamble_data[j >> 3] & (1 << (j & 7)))
+ js->ir_post_data[i >> 3] |= (1 << (i & 7));
+ else
+ js->ir_post_data[i >> 3] &=
+ ~(u32)(1 << (i & 7));
+
+ }
+ }
+
+ return status;
+}
+
+static void altera_jreset_idle(struct altera_state *astate)
+{
+ struct altera_jtag *js = &astate->js;
+ int i;
+ /* Go to Test Logic Reset (no matter what the starting state may be) */
+ for (i = 0; i < 5; ++i)
+ alt_jtag_io(TMS_HIGH, TDI_LOW, IGNORE_TDO);
+
+ /* Now step to Run Test / Idle */
+ alt_jtag_io(TMS_LOW, TDI_LOW, IGNORE_TDO);
+ js->jtag_state = IDLE;
+}
+
+int altera_goto_jstate(struct altera_state *astate,
+ enum altera_jtag_state state)
+{
+ struct altera_jtag *js = &astate->js;
+ int tms;
+ int count = 0;
+ int status = 0;
+
+ if (js->jtag_state == ILLEGAL_JTAG_STATE)
+ /* initialize JTAG chain to known state */
+ altera_jreset_idle(astate);
+
+ if (js->jtag_state == state) {
+ /*
+ * We are already in the desired state.
+ * If it is a stable state, loop here.
+ * Otherwise do nothing (no clock cycles).
+ */
+ if ((state == IDLE) || (state == DRSHIFT) ||
+ (state == DRPAUSE) || (state == IRSHIFT) ||
+ (state == IRPAUSE)) {
+ alt_jtag_io(TMS_LOW, TDI_LOW, IGNORE_TDO);
+ } else if (state == RESET)
+ alt_jtag_io(TMS_HIGH, TDI_LOW, IGNORE_TDO);
+
+ } else {
+ while ((js->jtag_state != state) && (count < 9)) {
+ /* Get TMS value to take a step toward desired state */
+ tms = (altera_jtag_path_map[js->jtag_state] &
+ (1 << state))
+ ? TMS_HIGH : TMS_LOW;
+
+ /* Take a step */
+ alt_jtag_io(tms, TDI_LOW, IGNORE_TDO);
+
+ if (tms)
+ js->jtag_state =
+ altera_transitions[js->jtag_state].tms_high;
+ else
+ js->jtag_state =
+ altera_transitions[js->jtag_state].tms_low;
+
+ ++count;
+ }
+ }
+
+ if (js->jtag_state != state)
+ status = -EREMOTEIO;
+
+ return status;
+}
+
+int altera_wait_cycles(struct altera_state *astate,
+ s32 cycles,
+ enum altera_jtag_state wait_state)
+{
+ struct altera_jtag *js = &astate->js;
+ int tms;
+ s32 count;
+ int status = 0;
+
+ if (js->jtag_state != wait_state)
+ status = altera_goto_jstate(astate, wait_state);
+
+ if (status == 0) {
+ /*
+ * Set TMS high to loop in RESET state
+ * Set TMS low to loop in any other stable state
+ */
+ tms = (wait_state == RESET) ? TMS_HIGH : TMS_LOW;
+
+ for (count = 0L; count < cycles; count++)
+ alt_jtag_io(tms, TDI_LOW, IGNORE_TDO);
+
+ }
+
+ return status;
+}
+
+int altera_wait_msecs(struct altera_state *astate,
+ s32 microseconds, enum altera_jtag_state wait_state)
+/*
+ * Causes JTAG hardware to sit in the specified stable
+ * state for the specified duration of real time. If
+ * no JTAG operations have been performed yet, then only
+ * a delay is performed. This permits the WAIT USECS
+ * statement to be used in VECTOR programs without causing
+ * any JTAG operations.
+ * Returns 0 for success, else appropriate error code.
+ */
+{
+ struct altera_jtag *js = &astate->js;
+ int status = 0;
+
+ if ((js->jtag_state != ILLEGAL_JTAG_STATE) &&
+ (js->jtag_state != wait_state))
+ status = altera_goto_jstate(astate, wait_state);
+
+ if (status == 0)
+ /* Wait for specified time interval */
+ udelay(microseconds);
+
+ return status;
+}
+
+static void altera_concatenate_data(u8 *buffer,
+ u8 *preamble_data,
+ u32 preamble_count,
+ u8 *target_data,
+ u32 start_index,
+ u32 target_count,
+ u8 *postamble_data,
+ u32 postamble_count)
+/*
+ * Copies preamble data, target data, and postamble data
+ * into one buffer for IR or DR scans.
+ */
+{
+ u32 i, j, k;
+
+ for (i = 0L; i < preamble_count; ++i) {
+ if (preamble_data[i >> 3L] & (1L << (i & 7L)))
+ buffer[i >> 3L] |= (1L << (i & 7L));
+ else
+ buffer[i >> 3L] &= ~(u32)(1L << (i & 7L));
+
+ }
+
+ j = start_index;
+ k = preamble_count + target_count;
+ for (; i < k; ++i, ++j) {
+ if (target_data[j >> 3L] & (1L << (j & 7L)))
+ buffer[i >> 3L] |= (1L << (i & 7L));
+ else
+ buffer[i >> 3L] &= ~(u32)(1L << (i & 7L));
+
+ }
+
+ j = 0L;
+ k = preamble_count + target_count + postamble_count;
+ for (; i < k; ++i, ++j) {
+ if (postamble_data[j >> 3L] & (1L << (j & 7L)))
+ buffer[i >> 3L] |= (1L << (i & 7L));
+ else
+ buffer[i >> 3L] &= ~(u32)(1L << (i & 7L));
+
+ }
+}
+
+static int alt_jtag_drscan(struct altera_state *astate,
+ int start_state,
+ int count,
+ u8 *tdi,
+ u8 *tdo)
+{
+ int i = 0;
+ int tdo_bit = 0;
+ int status = 1;
+
+ /* First go to DRSHIFT state */
+ switch (start_state) {
+ case 0: /* IDLE */
+ alt_jtag_io(1, 0, 0); /* DRSELECT */
+ alt_jtag_io(0, 0, 0); /* DRCAPTURE */
+ alt_jtag_io(0, 0, 0); /* DRSHIFT */
+ break;
+
+ case 1: /* DRPAUSE */
+ alt_jtag_io(1, 0, 0); /* DREXIT2 */
+ alt_jtag_io(1, 0, 0); /* DRUPDATE */
+ alt_jtag_io(1, 0, 0); /* DRSELECT */
+ alt_jtag_io(0, 0, 0); /* DRCAPTURE */
+ alt_jtag_io(0, 0, 0); /* DRSHIFT */
+ break;
+
+ case 2: /* IRPAUSE */
+ alt_jtag_io(1, 0, 0); /* IREXIT2 */
+ alt_jtag_io(1, 0, 0); /* IRUPDATE */
+ alt_jtag_io(1, 0, 0); /* DRSELECT */
+ alt_jtag_io(0, 0, 0); /* DRCAPTURE */
+ alt_jtag_io(0, 0, 0); /* DRSHIFT */
+ break;
+
+ default:
+ status = 0;
+ }
+
+ if (status) {
+ /* loop in the SHIFT-DR state */
+ for (i = 0; i < count; i++) {
+ tdo_bit = alt_jtag_io(
+ (i == count - 1),
+ tdi[i >> 3] & (1 << (i & 7)),
+ (tdo != NULL));
+
+ if (tdo != NULL) {
+ if (tdo_bit)
+ tdo[i >> 3] |= (1 << (i & 7));
+ else
+ tdo[i >> 3] &= ~(u32)(1 << (i & 7));
+
+ }
+ }
+
+ alt_jtag_io(0, 0, 0); /* DRPAUSE */
+ }
+
+ return status;
+}
+
+static int alt_jtag_irscan(struct altera_state *astate,
+ int start_state,
+ int count,
+ u8 *tdi,
+ u8 *tdo)
+{
+ int i = 0;
+ int tdo_bit = 0;
+ int status = 1;
+
+ /* First go to IRSHIFT state */
+ switch (start_state) {
+ case 0: /* IDLE */
+ alt_jtag_io(1, 0, 0); /* DRSELECT */
+ alt_jtag_io(1, 0, 0); /* IRSELECT */
+ alt_jtag_io(0, 0, 0); /* IRCAPTURE */
+ alt_jtag_io(0, 0, 0); /* IRSHIFT */
+ break;
+
+ case 1: /* DRPAUSE */
+ alt_jtag_io(1, 0, 0); /* DREXIT2 */
+ alt_jtag_io(1, 0, 0); /* DRUPDATE */
+ alt_jtag_io(1, 0, 0); /* DRSELECT */
+ alt_jtag_io(1, 0, 0); /* IRSELECT */
+ alt_jtag_io(0, 0, 0); /* IRCAPTURE */
+ alt_jtag_io(0, 0, 0); /* IRSHIFT */
+ break;
+
+ case 2: /* IRPAUSE */
+ alt_jtag_io(1, 0, 0); /* IREXIT2 */
+ alt_jtag_io(1, 0, 0); /* IRUPDATE */
+ alt_jtag_io(1, 0, 0); /* DRSELECT */
+ alt_jtag_io(1, 0, 0); /* IRSELECT */
+ alt_jtag_io(0, 0, 0); /* IRCAPTURE */
+ alt_jtag_io(0, 0, 0); /* IRSHIFT */
+ break;
+
+ default:
+ status = 0;
+ }
+
+ if (status) {
+ /* loop in the SHIFT-IR state */
+ for (i = 0; i < count; i++) {
+ tdo_bit = alt_jtag_io(
+ (i == count - 1),
+ tdi[i >> 3] & (1 << (i & 7)),
+ (tdo != NULL));
+ if (tdo != NULL) {
+ if (tdo_bit)
+ tdo[i >> 3] |= (1 << (i & 7));
+ else
+ tdo[i >> 3] &= ~(u32)(1 << (i & 7));
+
+ }
+ }
+
+ alt_jtag_io(0, 0, 0); /* IRPAUSE */
+ }
+
+ return status;
+}
+
+static void altera_extract_target_data(u8 *buffer,
+ u8 *target_data,
+ u32 start_index,
+ u32 preamble_count,
+ u32 target_count)
+/*
+ * Copies target data from scan buffer, filtering out
+ * preamble and postamble data.
+ */
+{
+ u32 i;
+ u32 j;
+ u32 k;
+
+ j = preamble_count;
+ k = start_index + target_count;
+ for (i = start_index; i < k; ++i, ++j) {
+ if (buffer[j >> 3] & (1 << (j & 7)))
+ target_data[i >> 3] |= (1 << (i & 7));
+ else
+ target_data[i >> 3] &= ~(u32)(1 << (i & 7));
+
+ }
+}
+
+int altera_irscan(struct altera_state *astate,
+ u32 count,
+ u8 *tdi_data,
+ u32 start_index)
+/* Shifts data into instruction register */
+{
+ struct altera_jtag *js = &astate->js;
+ int start_code = 0;
+ u32 alloc_chars = 0;
+ u32 shift_count = js->ir_pre + count + js->ir_post;
+ int status = 0;
+ enum altera_jtag_state start_state = ILLEGAL_JTAG_STATE;
+
+ switch (js->jtag_state) {
+ case ILLEGAL_JTAG_STATE:
+ case RESET:
+ case IDLE:
+ start_code = 0;
+ start_state = IDLE;
+ break;
+
+ case DRSELECT:
+ case DRCAPTURE:
+ case DRSHIFT:
+ case DREXIT1:
+ case DRPAUSE:
+ case DREXIT2:
+ case DRUPDATE:
+ start_code = 1;
+ start_state = DRPAUSE;
+ break;
+
+ case IRSELECT:
+ case IRCAPTURE:
+ case IRSHIFT:
+ case IREXIT1:
+ case IRPAUSE:
+ case IREXIT2:
+ case IRUPDATE:
+ start_code = 2;
+ start_state = IRPAUSE;
+ break;
+
+ default:
+ status = -EREMOTEIO;
+ break;
+ }
+
+ if (status == 0)
+ if (js->jtag_state != start_state)
+ status = altera_goto_jstate(astate, start_state);
+
+ if (status == 0) {
+ if (shift_count > js->ir_length) {
+ alloc_chars = (shift_count + 7) >> 3;
+ kfree(js->ir_buffer);
+ js->ir_buffer = (u8 *)alt_malloc(alloc_chars);
+ if (js->ir_buffer == NULL)
+ status = -ENOMEM;
+ else
+ js->ir_length = alloc_chars * 8;
+
+ }
+ }
+
+ if (status == 0) {
+ /*
+ * Copy preamble data, IR data,
+ * and postamble data into a buffer
+ */
+ altera_concatenate_data(js->ir_buffer,
+ js->ir_pre_data,
+ js->ir_pre,
+ tdi_data,
+ start_index,
+ count,
+ js->ir_post_data,
+ js->ir_post);
+ /* Do the IRSCAN */
+ alt_jtag_irscan(astate,
+ start_code,
+ shift_count,
+ js->ir_buffer,
+ NULL);
+
+ /* alt_jtag_irscan() always ends in IRPAUSE state */
+ js->jtag_state = IRPAUSE;
+ }
+
+ if (status == 0)
+ if (js->irstop_state != IRPAUSE)
+ status = altera_goto_jstate(astate, js->irstop_state);
+
+
+ return status;
+}
+
+int altera_swap_ir(struct altera_state *astate,
+ u32 count,
+ u8 *in_data,
+ u32 in_index,
+ u8 *out_data,
+ u32 out_index)
+/* Shifts data into instruction register, capturing output data */
+{
+ struct altera_jtag *js = &astate->js;
+ int start_code = 0;
+ u32 alloc_chars = 0;
+ u32 shift_count = js->ir_pre + count + js->ir_post;
+ int status = 0;
+ enum altera_jtag_state start_state = ILLEGAL_JTAG_STATE;
+
+ switch (js->jtag_state) {
+ case ILLEGAL_JTAG_STATE:
+ case RESET:
+ case IDLE:
+ start_code = 0;
+ start_state = IDLE;
+ break;
+
+ case DRSELECT:
+ case DRCAPTURE:
+ case DRSHIFT:
+ case DREXIT1:
+ case DRPAUSE:
+ case DREXIT2:
+ case DRUPDATE:
+ start_code = 1;
+ start_state = DRPAUSE;
+ break;
+
+ case IRSELECT:
+ case IRCAPTURE:
+ case IRSHIFT:
+ case IREXIT1:
+ case IRPAUSE:
+ case IREXIT2:
+ case IRUPDATE:
+ start_code = 2;
+ start_state = IRPAUSE;
+ break;
+
+ default:
+ status = -EREMOTEIO;
+ break;
+ }
+
+ if (status == 0)
+ if (js->jtag_state != start_state)
+ status = altera_goto_jstate(astate, start_state);
+
+ if (status == 0) {
+ if (shift_count > js->ir_length) {
+ alloc_chars = (shift_count + 7) >> 3;
+ kfree(js->ir_buffer);
+ js->ir_buffer = (u8 *)alt_malloc(alloc_chars);
+ if (js->ir_buffer == NULL)
+ status = -ENOMEM;
+ else
+ js->ir_length = alloc_chars * 8;
+
+ }
+ }
+
+ if (status == 0) {
+ /*
+ * Copy preamble data, IR data,
+ * and postamble data into a buffer
+ */
+ altera_concatenate_data(js->ir_buffer,
+ js->ir_pre_data,
+ js->ir_pre,
+ in_data,
+ in_index,
+ count,
+ js->ir_post_data,
+ js->ir_post);
+
+ /* Do the IRSCAN */
+ alt_jtag_irscan(astate,
+ start_code,
+ shift_count,
+ js->ir_buffer,
+ js->ir_buffer);
+
+ /* alt_jtag_irscan() always ends in IRPAUSE state */
+ js->jtag_state = IRPAUSE;
+ }
+
+ if (status == 0)
+ if (js->irstop_state != IRPAUSE)
+ status = altera_goto_jstate(astate, js->irstop_state);
+
+
+ if (status == 0)
+ /* Now extract the returned data from the buffer */
+ altera_extract_target_data(js->ir_buffer,
+ out_data, out_index,
+ js->ir_pre, count);
+
+ return status;
+}
+
+int altera_drscan(struct altera_state *astate,
+ u32 count,
+ u8 *tdi_data,
+ u32 start_index)
+/* Shifts data into data register (ignoring output data) */
+{
+ struct altera_jtag *js = &astate->js;
+ int start_code = 0;
+ u32 alloc_chars = 0;
+ u32 shift_count = js->dr_pre + count + js->dr_post;
+ int status = 0;
+ enum altera_jtag_state start_state = ILLEGAL_JTAG_STATE;
+
+ switch (js->jtag_state) {
+ case ILLEGAL_JTAG_STATE:
+ case RESET:
+ case IDLE:
+ start_code = 0;
+ start_state = IDLE;
+ break;
+
+ case DRSELECT:
+ case DRCAPTURE:
+ case DRSHIFT:
+ case DREXIT1:
+ case DRPAUSE:
+ case DREXIT2:
+ case DRUPDATE:
+ start_code = 1;
+ start_state = DRPAUSE;
+ break;
+
+ case IRSELECT:
+ case IRCAPTURE:
+ case IRSHIFT:
+ case IREXIT1:
+ case IRPAUSE:
+ case IREXIT2:
+ case IRUPDATE:
+ start_code = 2;
+ start_state = IRPAUSE;
+ break;
+
+ default:
+ status = -EREMOTEIO;
+ break;
+ }
+
+ if (status == 0)
+ if (js->jtag_state != start_state)
+ status = altera_goto_jstate(astate, start_state);
+
+ if (status == 0) {
+ if (shift_count > js->dr_length) {
+ alloc_chars = (shift_count + 7) >> 3;
+ kfree(js->dr_buffer);
+ js->dr_buffer = (u8 *)alt_malloc(alloc_chars);
+ if (js->dr_buffer == NULL)
+ status = -ENOMEM;
+ else
+ js->dr_length = alloc_chars * 8;
+
+ }
+ }
+
+ if (status == 0) {
+ /*
+ * Copy preamble data, DR data,
+ * and postamble data into a buffer
+ */
+ altera_concatenate_data(js->dr_buffer,
+ js->dr_pre_data,
+ js->dr_pre,
+ tdi_data,
+ start_index,
+ count,
+ js->dr_post_data,
+ js->dr_post);
+ /* Do the DRSCAN */
+ alt_jtag_drscan(astate, start_code, shift_count,
+ js->dr_buffer, NULL);
+ /* alt_jtag_drscan() always ends in DRPAUSE state */
+ js->jtag_state = DRPAUSE;
+ }
+
+ if (status == 0)
+ if (js->drstop_state != DRPAUSE)
+ status = altera_goto_jstate(astate, js->drstop_state);
+
+ return status;
+}
+
+int altera_swap_dr(struct altera_state *astate, u32 count,
+ u8 *in_data, u32 in_index,
+ u8 *out_data, u32 out_index)
+/* Shifts data into data register, capturing output data */
+{
+ struct altera_jtag *js = &astate->js;
+ int start_code = 0;
+ u32 alloc_chars = 0;
+ u32 shift_count = js->dr_pre + count + js->dr_post;
+ int status = 0;
+ enum altera_jtag_state start_state = ILLEGAL_JTAG_STATE;
+
+ switch (js->jtag_state) {
+ case ILLEGAL_JTAG_STATE:
+ case RESET:
+ case IDLE:
+ start_code = 0;
+ start_state = IDLE;
+ break;
+
+ case DRSELECT:
+ case DRCAPTURE:
+ case DRSHIFT:
+ case DREXIT1:
+ case DRPAUSE:
+ case DREXIT2:
+ case DRUPDATE:
+ start_code = 1;
+ start_state = DRPAUSE;
+ break;
+
+ case IRSELECT:
+ case IRCAPTURE:
+ case IRSHIFT:
+ case IREXIT1:
+ case IRPAUSE:
+ case IREXIT2:
+ case IRUPDATE:
+ start_code = 2;
+ start_state = IRPAUSE;
+ break;
+
+ default:
+ status = -EREMOTEIO;
+ break;
+ }
+
+ if (status == 0)
+ if (js->jtag_state != start_state)
+ status = altera_goto_jstate(astate, start_state);
+
+ if (status == 0) {
+ if (shift_count > js->dr_length) {
+ alloc_chars = (shift_count + 7) >> 3;
+ kfree(js->dr_buffer);
+ js->dr_buffer = (u8 *)alt_malloc(alloc_chars);
+
+ if (js->dr_buffer == NULL)
+ status = -ENOMEM;
+ else
+ js->dr_length = alloc_chars * 8;
+
+ }
+ }
+
+ if (status == 0) {
+ /*
+ * Copy preamble data, DR data,
+ * and postamble data into a buffer
+ */
+ altera_concatenate_data(js->dr_buffer,
+ js->dr_pre_data,
+ js->dr_pre,
+ in_data,
+ in_index,
+ count,
+ js->dr_post_data,
+ js->dr_post);
+
+ /* Do the DRSCAN */
+ alt_jtag_drscan(astate,
+ start_code,
+ shift_count,
+ js->dr_buffer,
+ js->dr_buffer);
+
+ /* alt_jtag_drscan() always ends in DRPAUSE state */
+ js->jtag_state = DRPAUSE;
+ }
+
+ if (status == 0)
+ if (js->drstop_state != DRPAUSE)
+ status = altera_goto_jstate(astate, js->drstop_state);
+
+ if (status == 0)
+ /* Now extract the returned data from the buffer */
+ altera_extract_target_data(js->dr_buffer,
+ out_data,
+ out_index,
+ js->dr_pre,
+ count);
+
+ return status;
+}
+
+void altera_free_buffers(struct altera_state *astate)
+{
+ struct altera_jtag *js = &astate->js;
+ /* If the JTAG interface was used, reset it to TLR */
+ if (js->jtag_state != ILLEGAL_JTAG_STATE)
+ altera_jreset_idle(astate);
+
+ kfree(js->dr_pre_data);
+ js->dr_pre_data = NULL;
+
+ kfree(js->dr_post_data);
+ js->dr_post_data = NULL;
+
+ kfree(js->dr_buffer);
+ js->dr_buffer = NULL;
+
+ kfree(js->ir_pre_data);
+ js->ir_pre_data = NULL;
+
+ kfree(js->ir_post_data);
+ js->ir_post_data = NULL;
+
+ kfree(js->ir_buffer);
+ js->ir_buffer = NULL;
+}
diff --git a/drivers/misc/altera-stapl/altera-jtag.h b/drivers/misc/altera-stapl/altera-jtag.h
new file mode 100644
index 00000000..2f97e36a
--- /dev/null
+++ b/drivers/misc/altera-stapl/altera-jtag.h
@@ -0,0 +1,113 @@
+/*
+ * altera-jtag.h
+ *
+ * altera FPGA driver
+ *
+ * Copyright (C) Altera Corporation 1998-2001
+ * Copyright (C) 2010 NetUP Inc.
+ * Copyright (C) 2010 Igor M. Liplianin <liplianin@netup.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ *
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef ALTERA_JTAG_H
+#define ALTERA_JTAG_H
+
+/* Function Prototypes */
+enum altera_jtag_state {
+ ILLEGAL_JTAG_STATE = -1,
+ RESET = 0,
+ IDLE = 1,
+ DRSELECT = 2,
+ DRCAPTURE = 3,
+ DRSHIFT = 4,
+ DREXIT1 = 5,
+ DRPAUSE = 6,
+ DREXIT2 = 7,
+ DRUPDATE = 8,
+ IRSELECT = 9,
+ IRCAPTURE = 10,
+ IRSHIFT = 11,
+ IREXIT1 = 12,
+ IRPAUSE = 13,
+ IREXIT2 = 14,
+ IRUPDATE = 15
+
+};
+
+struct altera_jtag {
+ /* Global variable to store the current JTAG state */
+ enum altera_jtag_state jtag_state;
+
+ /* Store current stop-state for DR and IR scan commands */
+ enum altera_jtag_state drstop_state;
+ enum altera_jtag_state irstop_state;
+
+ /* Store current padding values */
+ u32 dr_pre;
+ u32 dr_post;
+ u32 ir_pre;
+ u32 ir_post;
+ u32 dr_length;
+ u32 ir_length;
+ u8 *dr_pre_data;
+ u8 *dr_post_data;
+ u8 *ir_pre_data;
+ u8 *ir_post_data;
+ u8 *dr_buffer;
+ u8 *ir_buffer;
+};
+
+#define ALTERA_STACK_SIZE 128
+#define ALTERA_MESSAGE_LENGTH 1024
+
+struct altera_state {
+ struct altera_config *config;
+ struct altera_jtag js;
+ char msg_buff[ALTERA_MESSAGE_LENGTH + 1];
+ long stack[ALTERA_STACK_SIZE];
+};
+
+int altera_jinit(struct altera_state *astate);
+int altera_set_drstop(struct altera_jtag *js, enum altera_jtag_state state);
+int altera_set_irstop(struct altera_jtag *js, enum altera_jtag_state state);
+int altera_set_dr_pre(struct altera_jtag *js, u32 count, u32 start_index,
+ u8 *preamble_data);
+int altera_set_ir_pre(struct altera_jtag *js, u32 count, u32 start_index,
+ u8 *preamble_data);
+int altera_set_dr_post(struct altera_jtag *js, u32 count, u32 start_index,
+ u8 *postamble_data);
+int altera_set_ir_post(struct altera_jtag *js, u32 count, u32 start_index,
+ u8 *postamble_data);
+int altera_goto_jstate(struct altera_state *astate,
+ enum altera_jtag_state state);
+int altera_wait_cycles(struct altera_state *astate, s32 cycles,
+ enum altera_jtag_state wait_state);
+int altera_wait_msecs(struct altera_state *astate, s32 microseconds,
+ enum altera_jtag_state wait_state);
+int altera_irscan(struct altera_state *astate, u32 count,
+ u8 *tdi_data, u32 start_index);
+int altera_swap_ir(struct altera_state *astate,
+ u32 count, u8 *in_data,
+ u32 in_index, u8 *out_data,
+ u32 out_index);
+int altera_drscan(struct altera_state *astate, u32 count,
+ u8 *tdi_data, u32 start_index);
+int altera_swap_dr(struct altera_state *astate, u32 count,
+ u8 *in_data, u32 in_index,
+ u8 *out_data, u32 out_index);
+void altera_free_buffers(struct altera_state *astate);
+#endif /* ALTERA_JTAG_H */
diff --git a/drivers/misc/altera-stapl/altera-lpt.c b/drivers/misc/altera-stapl/altera-lpt.c
new file mode 100644
index 00000000..91456a03
--- /dev/null
+++ b/drivers/misc/altera-stapl/altera-lpt.c
@@ -0,0 +1,70 @@
+/*
+ * altera-lpt.c
+ *
+ * altera FPGA driver
+ *
+ * Copyright (C) Altera Corporation 1998-2001
+ * Copyright (C) 2010 NetUP Inc.
+ * Copyright (C) 2010 Abylay Ospan <aospan@netup.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ *
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include "altera-exprt.h"
+
+static int lpt_hardware_initialized;
+
+static void byteblaster_write(int port, int data)
+{
+ outb((u8)data, (u16)(port + 0x378));
+};
+
+static int byteblaster_read(int port)
+{
+ int data = 0;
+ data = inb((u16)(port + 0x378));
+ return data & 0xff;
+};
+
+int netup_jtag_io_lpt(void *device, int tms, int tdi, int read_tdo)
+{
+ int data = 0;
+ int tdo = 0;
+ int initial_lpt_ctrl = 0;
+
+ if (!lpt_hardware_initialized) {
+ initial_lpt_ctrl = byteblaster_read(2);
+ byteblaster_write(2, (initial_lpt_ctrl | 0x02) & 0xdf);
+ lpt_hardware_initialized = 1;
+ }
+
+ data = ((tdi ? 0x40 : 0) | (tms ? 0x02 : 0));
+
+ byteblaster_write(0, data);
+
+ if (read_tdo) {
+ tdo = byteblaster_read(1);
+ tdo = ((tdo & 0x80) ? 0 : 1);
+ }
+
+ byteblaster_write(0, data | 0x01);
+
+ byteblaster_write(0, data);
+
+ return tdo;
+}
diff --git a/drivers/misc/altera-stapl/altera.c b/drivers/misc/altera-stapl/altera.c
new file mode 100644
index 00000000..24272e02
--- /dev/null
+++ b/drivers/misc/altera-stapl/altera.c
@@ -0,0 +1,2537 @@
+/*
+ * altera.c
+ *
+ * altera FPGA driver
+ *
+ * Copyright (C) Altera Corporation 1998-2001
+ * Copyright (C) 2010,2011 NetUP Inc.
+ * Copyright (C) 2010,2011 Igor M. Liplianin <liplianin@netup.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ *
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <asm/unaligned.h>
+#include <linux/ctype.h>
+#include <linux/string.h>
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <misc/altera.h>
+#include "altera-exprt.h"
+#include "altera-jtag.h"
+
+static int debug = 1;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "enable debugging information");
+
+MODULE_DESCRIPTION("altera FPGA kernel module");
+MODULE_AUTHOR("Igor M. Liplianin <liplianin@netup.ru>");
+MODULE_LICENSE("GPL");
+
+#define dprintk(args...) \
+ if (debug) { \
+ printk(KERN_DEBUG args); \
+ }
+
+enum altera_fpga_opcode {
+ OP_NOP = 0,
+ OP_DUP,
+ OP_SWP,
+ OP_ADD,
+ OP_SUB,
+ OP_MULT,
+ OP_DIV,
+ OP_MOD,
+ OP_SHL,
+ OP_SHR,
+ OP_NOT,
+ OP_AND,
+ OP_OR,
+ OP_XOR,
+ OP_INV,
+ OP_GT,
+ OP_LT,
+ OP_RET,
+ OP_CMPS,
+ OP_PINT,
+ OP_PRNT,
+ OP_DSS,
+ OP_DSSC,
+ OP_ISS,
+ OP_ISSC,
+ OP_DPR = 0x1c,
+ OP_DPRL,
+ OP_DPO,
+ OP_DPOL,
+ OP_IPR,
+ OP_IPRL,
+ OP_IPO,
+ OP_IPOL,
+ OP_PCHR,
+ OP_EXIT,
+ OP_EQU,
+ OP_POPT,
+ OP_ABS = 0x2c,
+ OP_BCH0,
+ OP_PSH0 = 0x2f,
+ OP_PSHL = 0x40,
+ OP_PSHV,
+ OP_JMP,
+ OP_CALL,
+ OP_NEXT,
+ OP_PSTR,
+ OP_SINT = 0x47,
+ OP_ST,
+ OP_ISTP,
+ OP_DSTP,
+ OP_SWPN,
+ OP_DUPN,
+ OP_POPV,
+ OP_POPE,
+ OP_POPA,
+ OP_JMPZ,
+ OP_DS,
+ OP_IS,
+ OP_DPRA,
+ OP_DPOA,
+ OP_IPRA,
+ OP_IPOA,
+ OP_EXPT,
+ OP_PSHE,
+ OP_PSHA,
+ OP_DYNA,
+ OP_EXPV = 0x5c,
+ OP_COPY = 0x80,
+ OP_REVA,
+ OP_DSC,
+ OP_ISC,
+ OP_WAIT,
+ OP_VS,
+ OP_CMPA = 0xc0,
+ OP_VSC,
+};
+
+struct altera_procinfo {
+ char *name;
+ u8 attrs;
+ struct altera_procinfo *next;
+};
+
+/* This function checks if enough parameters are available on the stack. */
+static int altera_check_stack(int stack_ptr, int count, int *status)
+{
+ if (stack_ptr < count) {
+ *status = -EOVERFLOW;
+ return 0;
+ }
+
+ return 1;
+}
+
+static void altera_export_int(char *key, s32 value)
+{
+ dprintk("Export: key = \"%s\", value = %d\n", key, value);
+}
+
+#define HEX_LINE_CHARS 72
+#define HEX_LINE_BITS (HEX_LINE_CHARS * 4)
+
+static void altera_export_bool_array(char *key, u8 *data, s32 count)
+{
+ char string[HEX_LINE_CHARS + 1];
+ s32 i, offset;
+ u32 size, line, lines, linebits, value, j, k;
+
+ if (count > HEX_LINE_BITS) {
+ dprintk("Export: key = \"%s\", %d bits, value = HEX\n",
+ key, count);
+ lines = (count + (HEX_LINE_BITS - 1)) / HEX_LINE_BITS;
+
+ for (line = 0; line < lines; ++line) {
+ if (line < (lines - 1)) {
+ linebits = HEX_LINE_BITS;
+ size = HEX_LINE_CHARS;
+ offset = count - ((line + 1) * HEX_LINE_BITS);
+ } else {
+ linebits =
+ count - ((lines - 1) * HEX_LINE_BITS);
+ size = (linebits + 3) / 4;
+ offset = 0L;
+ }
+
+ string[size] = '\0';
+ j = size - 1;
+ value = 0;
+
+ for (k = 0; k < linebits; ++k) {
+ i = k + offset;
+ if (data[i >> 3] & (1 << (i & 7)))
+ value |= (1 << (i & 3));
+ if ((i & 3) == 3) {
+ sprintf(&string[j], "%1x", value);
+ value = 0;
+ --j;
+ }
+ }
+ if ((k & 3) > 0)
+ sprintf(&string[j], "%1x", value);
+
+ dprintk("%s\n", string);
+ }
+
+ } else {
+ size = (count + 3) / 4;
+ string[size] = '\0';
+ j = size - 1;
+ value = 0;
+
+ for (i = 0; i < count; ++i) {
+ if (data[i >> 3] & (1 << (i & 7)))
+ value |= (1 << (i & 3));
+ if ((i & 3) == 3) {
+ sprintf(&string[j], "%1x", value);
+ value = 0;
+ --j;
+ }
+ }
+ if ((i & 3) > 0)
+ sprintf(&string[j], "%1x", value);
+
+ dprintk("Export: key = \"%s\", %d bits, value = HEX %s\n",
+ key, count, string);
+ }
+}
+
+static int altera_execute(struct altera_state *astate,
+ u8 *p,
+ s32 program_size,
+ s32 *error_address,
+ int *exit_code,
+ int *format_version)
+{
+ struct altera_config *aconf = astate->config;
+ char *msg_buff = astate->msg_buff;
+ long *stack = astate->stack;
+ int status = 0;
+ u32 first_word = 0L;
+ u32 action_table = 0L;
+ u32 proc_table = 0L;
+ u32 str_table = 0L;
+ u32 sym_table = 0L;
+ u32 data_sect = 0L;
+ u32 code_sect = 0L;
+ u32 debug_sect = 0L;
+ u32 action_count = 0L;
+ u32 proc_count = 0L;
+ u32 sym_count = 0L;
+ long *vars = NULL;
+ s32 *var_size = NULL;
+ char *attrs = NULL;
+ u8 *proc_attributes = NULL;
+ u32 pc;
+ u32 opcode_address;
+ u32 args[3];
+ u32 opcode;
+ u32 name_id;
+ u8 charbuf[4];
+ long long_tmp;
+ u32 variable_id;
+ u8 *charptr_tmp;
+ u8 *charptr_tmp2;
+ long *longptr_tmp;
+ int version = 0;
+ int delta = 0;
+ int stack_ptr = 0;
+ u32 arg_count;
+ int done = 0;
+ int bad_opcode = 0;
+ u32 count;
+ u32 index;
+ u32 index2;
+ s32 long_count;
+ s32 long_idx;
+ s32 long_idx2;
+ u32 i;
+ u32 j;
+ u32 uncomp_size;
+ u32 offset;
+ u32 value;
+ int current_proc = 0;
+ int reverse;
+
+ char *name;
+
+ dprintk("%s\n", __func__);
+
+ /* Read header information */
+ if (program_size > 52L) {
+ first_word = get_unaligned_be32(&p[0]);
+ version = (first_word & 1L);
+ *format_version = version + 1;
+ delta = version * 8;
+
+ action_table = get_unaligned_be32(&p[4]);
+ proc_table = get_unaligned_be32(&p[8]);
+ str_table = get_unaligned_be32(&p[4 + delta]);
+ sym_table = get_unaligned_be32(&p[16 + delta]);
+ data_sect = get_unaligned_be32(&p[20 + delta]);
+ code_sect = get_unaligned_be32(&p[24 + delta]);
+ debug_sect = get_unaligned_be32(&p[28 + delta]);
+ action_count = get_unaligned_be32(&p[40 + delta]);
+ proc_count = get_unaligned_be32(&p[44 + delta]);
+ sym_count = get_unaligned_be32(&p[48 + (2 * delta)]);
+ }
+
+ if ((first_word != 0x4A414D00L) && (first_word != 0x4A414D01L)) {
+ done = 1;
+ status = -EIO;
+ goto exit_done;
+ }
+
+ if (sym_count <= 0)
+ goto exit_done;
+
+ vars = kzalloc(sym_count * sizeof(long), GFP_KERNEL);
+
+ if (vars == NULL)
+ status = -ENOMEM;
+
+ if (status == 0) {
+ var_size = kzalloc(sym_count * sizeof(s32), GFP_KERNEL);
+
+ if (var_size == NULL)
+ status = -ENOMEM;
+ }
+
+ if (status == 0) {
+ attrs = kzalloc(sym_count, GFP_KERNEL);
+
+ if (attrs == NULL)
+ status = -ENOMEM;
+ }
+
+ if ((status == 0) && (version > 0)) {
+ proc_attributes = kzalloc(proc_count, GFP_KERNEL);
+
+ if (proc_attributes == NULL)
+ status = -ENOMEM;
+ }
+
+ if (status != 0)
+ goto exit_done;
+
+ delta = version * 2;
+
+ for (i = 0; i < sym_count; ++i) {
+ offset = (sym_table + ((11 + delta) * i));
+
+ value = get_unaligned_be32(&p[offset + 3 + delta]);
+
+ attrs[i] = p[offset];
+
+ /*
+ * use bit 7 of attribute byte to indicate that
+ * this buffer was dynamically allocated
+ * and should be freed later
+ */
+ attrs[i] &= 0x7f;
+
+ var_size[i] = get_unaligned_be32(&p[offset + 7 + delta]);
+
+ /*
+ * Attribute bits:
+ * bit 0: 0 = read-only, 1 = read-write
+ * bit 1: 0 = not compressed, 1 = compressed
+ * bit 2: 0 = not initialized, 1 = initialized
+ * bit 3: 0 = scalar, 1 = array
+ * bit 4: 0 = Boolean, 1 = integer
+ * bit 5: 0 = declared variable,
+ * 1 = compiler created temporary variable
+ */
+
+ if ((attrs[i] & 0x0c) == 0x04)
+ /* initialized scalar variable */
+ vars[i] = value;
+ else if ((attrs[i] & 0x1e) == 0x0e) {
+ /* initialized compressed Boolean array */
+ uncomp_size = get_unaligned_le32(&p[data_sect + value]);
+
+ /* allocate a buffer for the uncompressed data */
+ vars[i] = (long)kzalloc(uncomp_size, GFP_KERNEL);
+ if (vars[i] == 0L)
+ status = -ENOMEM;
+ else {
+ /* set flag so buffer will be freed later */
+ attrs[i] |= 0x80;
+
+ /* uncompress the data */
+ if (altera_shrink(&p[data_sect + value],
+ var_size[i],
+ (u8 *)vars[i],
+ uncomp_size,
+ version) != uncomp_size)
+ /* decompression failed */
+ status = -EIO;
+ else
+ var_size[i] = uncomp_size * 8L;
+
+ }
+ } else if ((attrs[i] & 0x1e) == 0x0c) {
+ /* initialized Boolean array */
+ vars[i] = value + data_sect + (long)p;
+ } else if ((attrs[i] & 0x1c) == 0x1c) {
+ /* initialized integer array */
+ vars[i] = value + data_sect;
+ } else if ((attrs[i] & 0x0c) == 0x08) {
+ /* uninitialized array */
+
+ /* flag attrs so that memory is freed */
+ attrs[i] |= 0x80;
+
+ if (var_size[i] > 0) {
+ u32 size;
+
+ if (attrs[i] & 0x10)
+ /* integer array */
+ size = (var_size[i] * sizeof(s32));
+ else
+ /* Boolean array */
+ size = ((var_size[i] + 7L) / 8L);
+
+ vars[i] = (long)kzalloc(size, GFP_KERNEL);
+
+ if (vars[i] == 0) {
+ status = -ENOMEM;
+ } else {
+ /* zero out memory */
+ for (j = 0; j < size; ++j)
+ ((u8 *)(vars[i]))[j] = 0;
+
+ }
+ } else
+ vars[i] = 0;
+
+ } else
+ vars[i] = 0;
+
+ }
+
+exit_done:
+ if (status != 0)
+ done = 1;
+
+ altera_jinit(astate);
+
+ pc = code_sect;
+ msg_buff[0] = '\0';
+
+ /*
+ * For JBC version 2, we will execute the procedures corresponding to
+ * the selected ACTION
+ */
+ if (version > 0) {
+ if (aconf->action == NULL) {
+ status = -EINVAL;
+ done = 1;
+ } else {
+ int action_found = 0;
+ for (i = 0; (i < action_count) && !action_found; ++i) {
+ name_id = get_unaligned_be32(&p[action_table +
+ (12 * i)]);
+
+ name = &p[str_table + name_id];
+
+ if (strnicmp(aconf->action, name, strlen(name)) == 0) {
+ action_found = 1;
+ current_proc =
+ get_unaligned_be32(&p[action_table +
+ (12 * i) + 8]);
+ }
+ }
+
+ if (!action_found) {
+ status = -EINVAL;
+ done = 1;
+ }
+ }
+
+ if (status == 0) {
+ int first_time = 1;
+ i = current_proc;
+ while ((i != 0) || first_time) {
+ first_time = 0;
+ /* check procedure attribute byte */
+ proc_attributes[i] =
+ (p[proc_table +
+ (13 * i) + 8] &
+ 0x03);
+
+ /*
+ * BIT0 - OPTIONAL
+ * BIT1 - RECOMMENDED
+ * BIT6 - FORCED OFF
+ * BIT7 - FORCED ON
+ */
+
+ i = get_unaligned_be32(&p[proc_table +
+ (13 * i) + 4]);
+ }
+
+ /*
+ * Set current_proc to the first procedure
+ * to be executed
+ */
+ i = current_proc;
+ while ((i != 0) &&
+ ((proc_attributes[i] == 1) ||
+ ((proc_attributes[i] & 0xc0) == 0x40))) {
+ i = get_unaligned_be32(&p[proc_table +
+ (13 * i) + 4]);
+ }
+
+ if ((i != 0) || ((i == 0) && (current_proc == 0) &&
+ ((proc_attributes[0] != 1) &&
+ ((proc_attributes[0] & 0xc0) != 0x40)))) {
+ current_proc = i;
+ pc = code_sect +
+ get_unaligned_be32(&p[proc_table +
+ (13 * i) + 9]);
+ if ((pc < code_sect) || (pc >= debug_sect))
+ status = -ERANGE;
+ } else
+ /* there are no procedures to execute! */
+ done = 1;
+
+ }
+ }
+
+ msg_buff[0] = '\0';
+
+ while (!done) {
+ opcode = (p[pc] & 0xff);
+ opcode_address = pc;
+ ++pc;
+
+ if (debug > 1)
+ printk("opcode: %02x\n", opcode);
+
+ arg_count = (opcode >> 6) & 3;
+ for (i = 0; i < arg_count; ++i) {
+ args[i] = get_unaligned_be32(&p[pc]);
+ pc += 4;
+ }
+
+ switch (opcode) {
+ case OP_NOP:
+ break;
+ case OP_DUP:
+ if (altera_check_stack(stack_ptr, 1, &status)) {
+ stack[stack_ptr] = stack[stack_ptr - 1];
+ ++stack_ptr;
+ }
+ break;
+ case OP_SWP:
+ if (altera_check_stack(stack_ptr, 2, &status)) {
+ long_tmp = stack[stack_ptr - 2];
+ stack[stack_ptr - 2] = stack[stack_ptr - 1];
+ stack[stack_ptr - 1] = long_tmp;
+ }
+ break;
+ case OP_ADD:
+ if (altera_check_stack(stack_ptr, 2, &status)) {
+ --stack_ptr;
+ stack[stack_ptr - 1] += stack[stack_ptr];
+ }
+ break;
+ case OP_SUB:
+ if (altera_check_stack(stack_ptr, 2, &status)) {
+ --stack_ptr;
+ stack[stack_ptr - 1] -= stack[stack_ptr];
+ }
+ break;
+ case OP_MULT:
+ if (altera_check_stack(stack_ptr, 2, &status)) {
+ --stack_ptr;
+ stack[stack_ptr - 1] *= stack[stack_ptr];
+ }
+ break;
+ case OP_DIV:
+ if (altera_check_stack(stack_ptr, 2, &status)) {
+ --stack_ptr;
+ stack[stack_ptr - 1] /= stack[stack_ptr];
+ }
+ break;
+ case OP_MOD:
+ if (altera_check_stack(stack_ptr, 2, &status)) {
+ --stack_ptr;
+ stack[stack_ptr - 1] %= stack[stack_ptr];
+ }
+ break;
+ case OP_SHL:
+ if (altera_check_stack(stack_ptr, 2, &status)) {
+ --stack_ptr;
+ stack[stack_ptr - 1] <<= stack[stack_ptr];
+ }
+ break;
+ case OP_SHR:
+ if (altera_check_stack(stack_ptr, 2, &status)) {
+ --stack_ptr;
+ stack[stack_ptr - 1] >>= stack[stack_ptr];
+ }
+ break;
+ case OP_NOT:
+ if (altera_check_stack(stack_ptr, 1, &status))
+ stack[stack_ptr - 1] ^= (-1L);
+
+ break;
+ case OP_AND:
+ if (altera_check_stack(stack_ptr, 2, &status)) {
+ --stack_ptr;
+ stack[stack_ptr - 1] &= stack[stack_ptr];
+ }
+ break;
+ case OP_OR:
+ if (altera_check_stack(stack_ptr, 2, &status)) {
+ --stack_ptr;
+ stack[stack_ptr - 1] |= stack[stack_ptr];
+ }
+ break;
+ case OP_XOR:
+ if (altera_check_stack(stack_ptr, 2, &status)) {
+ --stack_ptr;
+ stack[stack_ptr - 1] ^= stack[stack_ptr];
+ }
+ break;
+ case OP_INV:
+ if (!altera_check_stack(stack_ptr, 1, &status))
+ break;
+ stack[stack_ptr - 1] = stack[stack_ptr - 1] ? 0L : 1L;
+ break;
+ case OP_GT:
+ if (!altera_check_stack(stack_ptr, 2, &status))
+ break;
+ --stack_ptr;
+ stack[stack_ptr - 1] =
+ (stack[stack_ptr - 1] > stack[stack_ptr]) ?
+ 1L : 0L;
+
+ break;
+ case OP_LT:
+ if (!altera_check_stack(stack_ptr, 2, &status))
+ break;
+ --stack_ptr;
+ stack[stack_ptr - 1] =
+ (stack[stack_ptr - 1] < stack[stack_ptr]) ?
+ 1L : 0L;
+
+ break;
+ case OP_RET:
+ if ((version > 0) && (stack_ptr == 0)) {
+ /*
+ * We completed one of the main procedures
+ * of an ACTION.
+ * Find the next procedure
+ * to be executed and jump to it.
+ * If there are no more procedures, then EXIT.
+ */
+ i = get_unaligned_be32(&p[proc_table +
+ (13 * current_proc) + 4]);
+ while ((i != 0) &&
+ ((proc_attributes[i] == 1) ||
+ ((proc_attributes[i] & 0xc0) == 0x40)))
+ i = get_unaligned_be32(&p[proc_table +
+ (13 * i) + 4]);
+
+ if (i == 0) {
+ /* no procedures to execute! */
+ done = 1;
+ *exit_code = 0; /* success */
+ } else {
+ current_proc = i;
+ pc = code_sect + get_unaligned_be32(
+ &p[proc_table +
+ (13 * i) + 9]);
+ if ((pc < code_sect) ||
+ (pc >= debug_sect))
+ status = -ERANGE;
+ }
+
+ } else
+ if (altera_check_stack(stack_ptr, 1, &status)) {
+ pc = stack[--stack_ptr] + code_sect;
+ if ((pc <= code_sect) ||
+ (pc >= debug_sect))
+ status = -ERANGE;
+
+ }
+
+ break;
+ case OP_CMPS:
+ /*
+ * Array short compare
+ * ...stack 0 is source 1 value
+ * ...stack 1 is source 2 value
+ * ...stack 2 is mask value
+ * ...stack 3 is count
+ */
+ if (altera_check_stack(stack_ptr, 4, &status)) {
+ s32 a = stack[--stack_ptr];
+ s32 b = stack[--stack_ptr];
+ long_tmp = stack[--stack_ptr];
+ count = stack[stack_ptr - 1];
+
+ if ((count < 1) || (count > 32))
+ status = -ERANGE;
+ else {
+ long_tmp &= ((-1L) >> (32 - count));
+
+ stack[stack_ptr - 1] =
+ ((a & long_tmp) == (b & long_tmp))
+ ? 1L : 0L;
+ }
+ }
+ break;
+ case OP_PINT:
+ /*
+ * PRINT add integer
+ * ...stack 0 is integer value
+ */
+ if (!altera_check_stack(stack_ptr, 1, &status))
+ break;
+ sprintf(&msg_buff[strlen(msg_buff)],
+ "%ld", stack[--stack_ptr]);
+ break;
+ case OP_PRNT:
+ /* PRINT finish */
+ if (debug)
+ printk(msg_buff, "\n");
+
+ msg_buff[0] = '\0';
+ break;
+ case OP_DSS:
+ /*
+ * DRSCAN short
+ * ...stack 0 is scan data
+ * ...stack 1 is count
+ */
+ if (!altera_check_stack(stack_ptr, 2, &status))
+ break;
+ long_tmp = stack[--stack_ptr];
+ count = stack[--stack_ptr];
+ put_unaligned_le32(long_tmp, &charbuf[0]);
+ status = altera_drscan(astate, count, charbuf, 0);
+ break;
+ case OP_DSSC:
+ /*
+ * DRSCAN short with capture
+ * ...stack 0 is scan data
+ * ...stack 1 is count
+ */
+ if (!altera_check_stack(stack_ptr, 2, &status))
+ break;
+ long_tmp = stack[--stack_ptr];
+ count = stack[stack_ptr - 1];
+ put_unaligned_le32(long_tmp, &charbuf[0]);
+ status = altera_swap_dr(astate, count, charbuf,
+ 0, charbuf, 0);
+ stack[stack_ptr - 1] = get_unaligned_le32(&charbuf[0]);
+ break;
+ case OP_ISS:
+ /*
+ * IRSCAN short
+ * ...stack 0 is scan data
+ * ...stack 1 is count
+ */
+ if (!altera_check_stack(stack_ptr, 2, &status))
+ break;
+ long_tmp = stack[--stack_ptr];
+ count = stack[--stack_ptr];
+ put_unaligned_le32(long_tmp, &charbuf[0]);
+ status = altera_irscan(astate, count, charbuf, 0);
+ break;
+ case OP_ISSC:
+ /*
+ * IRSCAN short with capture
+ * ...stack 0 is scan data
+ * ...stack 1 is count
+ */
+ if (!altera_check_stack(stack_ptr, 2, &status))
+ break;
+ long_tmp = stack[--stack_ptr];
+ count = stack[stack_ptr - 1];
+ put_unaligned_le32(long_tmp, &charbuf[0]);
+ status = altera_swap_ir(astate, count, charbuf,
+ 0, charbuf, 0);
+ stack[stack_ptr - 1] = get_unaligned_le32(&charbuf[0]);
+ break;
+ case OP_DPR:
+ if (!altera_check_stack(stack_ptr, 1, &status))
+ break;
+ count = stack[--stack_ptr];
+ status = altera_set_dr_pre(&astate->js, count, 0, NULL);
+ break;
+ case OP_DPRL:
+ /*
+ * DRPRE with literal data
+ * ...stack 0 is count
+ * ...stack 1 is literal data
+ */
+ if (!altera_check_stack(stack_ptr, 2, &status))
+ break;
+ count = stack[--stack_ptr];
+ long_tmp = stack[--stack_ptr];
+ put_unaligned_le32(long_tmp, &charbuf[0]);
+ status = altera_set_dr_pre(&astate->js, count, 0,
+ charbuf);
+ break;
+ case OP_DPO:
+ /*
+ * DRPOST
+ * ...stack 0 is count
+ */
+ if (altera_check_stack(stack_ptr, 1, &status)) {
+ count = stack[--stack_ptr];
+ status = altera_set_dr_post(&astate->js, count,
+ 0, NULL);
+ }
+ break;
+ case OP_DPOL:
+ /*
+ * DRPOST with literal data
+ * ...stack 0 is count
+ * ...stack 1 is literal data
+ */
+ if (!altera_check_stack(stack_ptr, 2, &status))
+ break;
+ count = stack[--stack_ptr];
+ long_tmp = stack[--stack_ptr];
+ put_unaligned_le32(long_tmp, &charbuf[0]);
+ status = altera_set_dr_post(&astate->js, count, 0,
+ charbuf);
+ break;
+ case OP_IPR:
+ if (altera_check_stack(stack_ptr, 1, &status)) {
+ count = stack[--stack_ptr];
+ status = altera_set_ir_pre(&astate->js, count,
+ 0, NULL);
+ }
+ break;
+ case OP_IPRL:
+ /*
+ * IRPRE with literal data
+ * ...stack 0 is count
+ * ...stack 1 is literal data
+ */
+ if (altera_check_stack(stack_ptr, 2, &status)) {
+ count = stack[--stack_ptr];
+ long_tmp = stack[--stack_ptr];
+ put_unaligned_le32(long_tmp, &charbuf[0]);
+ status = altera_set_ir_pre(&astate->js, count,
+ 0, charbuf);
+ }
+ break;
+ case OP_IPO:
+ /*
+ * IRPOST
+ * ...stack 0 is count
+ */
+ if (altera_check_stack(stack_ptr, 1, &status)) {
+ count = stack[--stack_ptr];
+ status = altera_set_ir_post(&astate->js, count,
+ 0, NULL);
+ }
+ break;
+ case OP_IPOL:
+ /*
+ * IRPOST with literal data
+ * ...stack 0 is count
+ * ...stack 1 is literal data
+ */
+ if (!altera_check_stack(stack_ptr, 2, &status))
+ break;
+ count = stack[--stack_ptr];
+ long_tmp = stack[--stack_ptr];
+ put_unaligned_le32(long_tmp, &charbuf[0]);
+ status = altera_set_ir_post(&astate->js, count, 0,
+ charbuf);
+ break;
+ case OP_PCHR:
+ if (altera_check_stack(stack_ptr, 1, &status)) {
+ u8 ch;
+ count = strlen(msg_buff);
+ ch = (char) stack[--stack_ptr];
+ if ((ch < 1) || (ch > 127)) {
+ /*
+ * character code out of range
+ * instead of flagging an error,
+ * force the value to 127
+ */
+ ch = 127;
+ }
+ msg_buff[count] = ch;
+ msg_buff[count + 1] = '\0';
+ }
+ break;
+ case OP_EXIT:
+ if (altera_check_stack(stack_ptr, 1, &status))
+ *exit_code = stack[--stack_ptr];
+
+ done = 1;
+ break;
+ case OP_EQU:
+ if (!altera_check_stack(stack_ptr, 2, &status))
+ break;
+ --stack_ptr;
+ stack[stack_ptr - 1] =
+ (stack[stack_ptr - 1] == stack[stack_ptr]) ?
+ 1L : 0L;
+ break;
+ case OP_POPT:
+ if (altera_check_stack(stack_ptr, 1, &status))
+ --stack_ptr;
+
+ break;
+ case OP_ABS:
+ if (!altera_check_stack(stack_ptr, 1, &status))
+ break;
+ if (stack[stack_ptr - 1] < 0)
+ stack[stack_ptr - 1] = 0 - stack[stack_ptr - 1];
+
+ break;
+ case OP_BCH0:
+ /*
+ * Batch operation 0
+ * SWP
+ * SWPN 7
+ * SWP
+ * SWPN 6
+ * DUPN 8
+ * SWPN 2
+ * SWP
+ * DUPN 6
+ * DUPN 6
+ */
+
+ /* SWP */
+ if (altera_check_stack(stack_ptr, 2, &status)) {
+ long_tmp = stack[stack_ptr - 2];
+ stack[stack_ptr - 2] = stack[stack_ptr - 1];
+ stack[stack_ptr - 1] = long_tmp;
+ }
+
+ /* SWPN 7 */
+ index = 7 + 1;
+ if (altera_check_stack(stack_ptr, index, &status)) {
+ long_tmp = stack[stack_ptr - index];
+ stack[stack_ptr - index] = stack[stack_ptr - 1];
+ stack[stack_ptr - 1] = long_tmp;
+ }
+
+ /* SWP */
+ if (altera_check_stack(stack_ptr, 2, &status)) {
+ long_tmp = stack[stack_ptr - 2];
+ stack[stack_ptr - 2] = stack[stack_ptr - 1];
+ stack[stack_ptr - 1] = long_tmp;
+ }
+
+ /* SWPN 6 */
+ index = 6 + 1;
+ if (altera_check_stack(stack_ptr, index, &status)) {
+ long_tmp = stack[stack_ptr - index];
+ stack[stack_ptr - index] = stack[stack_ptr - 1];
+ stack[stack_ptr - 1] = long_tmp;
+ }
+
+ /* DUPN 8 */
+ index = 8 + 1;
+ if (altera_check_stack(stack_ptr, index, &status)) {
+ stack[stack_ptr] = stack[stack_ptr - index];
+ ++stack_ptr;
+ }
+
+ /* SWPN 2 */
+ index = 2 + 1;
+ if (altera_check_stack(stack_ptr, index, &status)) {
+ long_tmp = stack[stack_ptr - index];
+ stack[stack_ptr - index] = stack[stack_ptr - 1];
+ stack[stack_ptr - 1] = long_tmp;
+ }
+
+ /* SWP */
+ if (altera_check_stack(stack_ptr, 2, &status)) {
+ long_tmp = stack[stack_ptr - 2];
+ stack[stack_ptr - 2] = stack[stack_ptr - 1];
+ stack[stack_ptr - 1] = long_tmp;
+ }
+
+ /* DUPN 6 */
+ index = 6 + 1;
+ if (altera_check_stack(stack_ptr, index, &status)) {
+ stack[stack_ptr] = stack[stack_ptr - index];
+ ++stack_ptr;
+ }
+
+ /* DUPN 6 */
+ index = 6 + 1;
+ if (altera_check_stack(stack_ptr, index, &status)) {
+ stack[stack_ptr] = stack[stack_ptr - index];
+ ++stack_ptr;
+ }
+ break;
+ case OP_PSH0:
+ stack[stack_ptr++] = 0;
+ break;
+ case OP_PSHL:
+ stack[stack_ptr++] = (s32) args[0];
+ break;
+ case OP_PSHV:
+ stack[stack_ptr++] = vars[args[0]];
+ break;
+ case OP_JMP:
+ pc = args[0] + code_sect;
+ if ((pc < code_sect) || (pc >= debug_sect))
+ status = -ERANGE;
+ break;
+ case OP_CALL:
+ stack[stack_ptr++] = pc;
+ pc = args[0] + code_sect;
+ if ((pc < code_sect) || (pc >= debug_sect))
+ status = -ERANGE;
+ break;
+ case OP_NEXT:
+ /*
+ * Process FOR / NEXT loop
+ * ...argument 0 is variable ID
+ * ...stack 0 is step value
+ * ...stack 1 is end value
+ * ...stack 2 is top address
+ */
+ if (altera_check_stack(stack_ptr, 3, &status)) {
+ s32 step = stack[stack_ptr - 1];
+ s32 end = stack[stack_ptr - 2];
+ s32 top = stack[stack_ptr - 3];
+ s32 iterator = vars[args[0]];
+ int break_out = 0;
+
+ if (step < 0) {
+ if (iterator <= end)
+ break_out = 1;
+ } else if (iterator >= end)
+ break_out = 1;
+
+ if (break_out) {
+ stack_ptr -= 3;
+ } else {
+ vars[args[0]] = iterator + step;
+ pc = top + code_sect;
+ if ((pc < code_sect) ||
+ (pc >= debug_sect))
+ status = -ERANGE;
+ }
+ }
+ break;
+ case OP_PSTR:
+ /*
+ * PRINT add string
+ * ...argument 0 is string ID
+ */
+ count = strlen(msg_buff);
+ strlcpy(&msg_buff[count],
+ &p[str_table + args[0]],
+ ALTERA_MESSAGE_LENGTH - count);
+ break;
+ case OP_SINT:
+ /*
+ * STATE intermediate state
+ * ...argument 0 is state code
+ */
+ status = altera_goto_jstate(astate, args[0]);
+ break;
+ case OP_ST:
+ /*
+ * STATE final state
+ * ...argument 0 is state code
+ */
+ status = altera_goto_jstate(astate, args[0]);
+ break;
+ case OP_ISTP:
+ /*
+ * IRSTOP state
+ * ...argument 0 is state code
+ */
+ status = altera_set_irstop(&astate->js, args[0]);
+ break;
+ case OP_DSTP:
+ /*
+ * DRSTOP state
+ * ...argument 0 is state code
+ */
+ status = altera_set_drstop(&astate->js, args[0]);
+ break;
+
+ case OP_SWPN:
+ /*
+ * Exchange top with Nth stack value
+ * ...argument 0 is 0-based stack entry
+ * to swap with top element
+ */
+ index = (args[0]) + 1;
+ if (altera_check_stack(stack_ptr, index, &status)) {
+ long_tmp = stack[stack_ptr - index];
+ stack[stack_ptr - index] = stack[stack_ptr - 1];
+ stack[stack_ptr - 1] = long_tmp;
+ }
+ break;
+ case OP_DUPN:
+ /*
+ * Duplicate Nth stack value
+ * ...argument 0 is 0-based stack entry to duplicate
+ */
+ index = (args[0]) + 1;
+ if (altera_check_stack(stack_ptr, index, &status)) {
+ stack[stack_ptr] = stack[stack_ptr - index];
+ ++stack_ptr;
+ }
+ break;
+ case OP_POPV:
+ /*
+ * Pop stack into scalar variable
+ * ...argument 0 is variable ID
+ * ...stack 0 is value
+ */
+ if (altera_check_stack(stack_ptr, 1, &status))
+ vars[args[0]] = stack[--stack_ptr];
+
+ break;
+ case OP_POPE:
+ /*
+ * Pop stack into integer array element
+ * ...argument 0 is variable ID
+ * ...stack 0 is array index
+ * ...stack 1 is value
+ */
+ if (!altera_check_stack(stack_ptr, 2, &status))
+ break;
+ variable_id = args[0];
+
+ /*
+ * If variable is read-only,
+ * convert to writable array
+ */
+ if ((version > 0) &&
+ ((attrs[variable_id] & 0x9c) == 0x1c)) {
+ /* Allocate a writable buffer for this array */
+ count = var_size[variable_id];
+ long_tmp = vars[variable_id];
+ longptr_tmp = kzalloc(count * sizeof(long),
+ GFP_KERNEL);
+ vars[variable_id] = (long)longptr_tmp;
+
+ if (vars[variable_id] == 0) {
+ status = -ENOMEM;
+ break;
+ }
+
+ /* copy previous contents into buffer */
+ for (i = 0; i < count; ++i) {
+ longptr_tmp[i] =
+ get_unaligned_be32(&p[long_tmp]);
+ long_tmp += sizeof(long);
+ }
+
+ /*
+ * set bit 7 - buffer was
+ * dynamically allocated
+ */
+ attrs[variable_id] |= 0x80;
+
+ /* clear bit 2 - variable is writable */
+ attrs[variable_id] &= ~0x04;
+ attrs[variable_id] |= 0x01;
+
+ }
+
+ /* check that variable is a writable integer array */
+ if ((attrs[variable_id] & 0x1c) != 0x18)
+ status = -ERANGE;
+ else {
+ longptr_tmp = (long *)vars[variable_id];
+
+ /* pop the array index */
+ index = stack[--stack_ptr];
+
+ /* pop the value and store it into the array */
+ longptr_tmp[index] = stack[--stack_ptr];
+ }
+
+ break;
+ case OP_POPA:
+ /*
+ * Pop stack into Boolean array
+ * ...argument 0 is variable ID
+ * ...stack 0 is count
+ * ...stack 1 is array index
+ * ...stack 2 is value
+ */
+ if (!altera_check_stack(stack_ptr, 3, &status))
+ break;
+ variable_id = args[0];
+
+ /*
+ * If variable is read-only,
+ * convert to writable array
+ */
+ if ((version > 0) &&
+ ((attrs[variable_id] & 0x9c) == 0x0c)) {
+ /* Allocate a writable buffer for this array */
+ long_tmp =
+ (var_size[variable_id] + 7L) >> 3L;
+ charptr_tmp2 = (u8 *)vars[variable_id];
+ charptr_tmp =
+ kzalloc(long_tmp, GFP_KERNEL);
+ vars[variable_id] = (long)charptr_tmp;
+
+ if (vars[variable_id] == 0) {
+ status = -ENOMEM;
+ break;
+ }
+
+ /* zero the buffer */
+ for (long_idx = 0L;
+ long_idx < long_tmp;
+ ++long_idx) {
+ charptr_tmp[long_idx] = 0;
+ }
+
+ /* copy previous contents into buffer */
+ for (long_idx = 0L;
+ long_idx < var_size[variable_id];
+ ++long_idx) {
+ long_idx2 = long_idx;
+
+ if (charptr_tmp2[long_idx2 >> 3] &
+ (1 << (long_idx2 & 7))) {
+ charptr_tmp[long_idx >> 3] |=
+ (1 << (long_idx & 7));
+ }
+ }
+
+ /*
+ * set bit 7 - buffer was
+ * dynamically allocated
+ */
+ attrs[variable_id] |= 0x80;
+
+ /* clear bit 2 - variable is writable */
+ attrs[variable_id] &= ~0x04;
+ attrs[variable_id] |= 0x01;
+
+ }
+
+ /*
+ * check that variable is
+ * a writable Boolean array
+ */
+ if ((attrs[variable_id] & 0x1c) != 0x08) {
+ status = -ERANGE;
+ break;
+ }
+
+ charptr_tmp = (u8 *)vars[variable_id];
+
+ /* pop the count (number of bits to copy) */
+ long_count = stack[--stack_ptr];
+
+ /* pop the array index */
+ long_idx = stack[--stack_ptr];
+
+ reverse = 0;
+
+ if (version > 0) {
+ /*
+ * stack 0 = array right index
+ * stack 1 = array left index
+ */
+
+ if (long_idx > long_count) {
+ reverse = 1;
+ long_tmp = long_count;
+ long_count = 1 + long_idx -
+ long_count;
+ long_idx = long_tmp;
+
+ /* reverse POPA is not supported */
+ status = -ERANGE;
+ break;
+ } else
+ long_count = 1 + long_count -
+ long_idx;
+
+ }
+
+ /* pop the data */
+ long_tmp = stack[--stack_ptr];
+
+ if (long_count < 1) {
+ status = -ERANGE;
+ break;
+ }
+
+ for (i = 0; i < long_count; ++i) {
+ if (long_tmp & (1L << (s32) i))
+ charptr_tmp[long_idx >> 3L] |=
+ (1L << (long_idx & 7L));
+ else
+ charptr_tmp[long_idx >> 3L] &=
+ ~(1L << (long_idx & 7L));
+
+ ++long_idx;
+ }
+
+ break;
+ case OP_JMPZ:
+ /*
+ * Pop stack and branch if zero
+ * ...argument 0 is address
+ * ...stack 0 is condition value
+ */
+ if (altera_check_stack(stack_ptr, 1, &status)) {
+ if (stack[--stack_ptr] == 0) {
+ pc = args[0] + code_sect;
+ if ((pc < code_sect) ||
+ (pc >= debug_sect))
+ status = -ERANGE;
+ }
+ }
+ break;
+ case OP_DS:
+ case OP_IS:
+ /*
+ * DRSCAN
+ * IRSCAN
+ * ...argument 0 is scan data variable ID
+ * ...stack 0 is array index
+ * ...stack 1 is count
+ */
+ if (!altera_check_stack(stack_ptr, 2, &status))
+ break;
+ long_idx = stack[--stack_ptr];
+ long_count = stack[--stack_ptr];
+ reverse = 0;
+ if (version > 0) {
+ /*
+ * stack 0 = array right index
+ * stack 1 = array left index
+ * stack 2 = count
+ */
+ long_tmp = long_count;
+ long_count = stack[--stack_ptr];
+
+ if (long_idx > long_tmp) {
+ reverse = 1;
+ long_idx = long_tmp;
+ }
+ }
+
+ charptr_tmp = (u8 *)vars[args[0]];
+
+ if (reverse) {
+ /*
+ * allocate a buffer
+ * and reverse the data order
+ */
+ charptr_tmp2 = charptr_tmp;
+ charptr_tmp = kzalloc((long_count >> 3) + 1,
+ GFP_KERNEL);
+ if (charptr_tmp == NULL) {
+ status = -ENOMEM;
+ break;
+ }
+
+ long_tmp = long_idx + long_count - 1;
+ long_idx2 = 0;
+ while (long_idx2 < long_count) {
+ if (charptr_tmp2[long_tmp >> 3] &
+ (1 << (long_tmp & 7)))
+ charptr_tmp[long_idx2 >> 3] |=
+ (1 << (long_idx2 & 7));
+ else
+ charptr_tmp[long_idx2 >> 3] &=
+ ~(1 << (long_idx2 & 7));
+
+ --long_tmp;
+ ++long_idx2;
+ }
+ }
+
+ if (opcode == 0x51) /* DS */
+ status = altera_drscan(astate, long_count,
+ charptr_tmp, long_idx);
+ else /* IS */
+ status = altera_irscan(astate, long_count,
+ charptr_tmp, long_idx);
+
+ if (reverse)
+ kfree(charptr_tmp);
+
+ break;
+ case OP_DPRA:
+ /*
+ * DRPRE with array data
+ * ...argument 0 is variable ID
+ * ...stack 0 is array index
+ * ...stack 1 is count
+ */
+ if (!altera_check_stack(stack_ptr, 2, &status))
+ break;
+ index = stack[--stack_ptr];
+ count = stack[--stack_ptr];
+
+ if (version > 0)
+ /*
+ * stack 0 = array right index
+ * stack 1 = array left index
+ */
+ count = 1 + count - index;
+
+ charptr_tmp = (u8 *)vars[args[0]];
+ status = altera_set_dr_pre(&astate->js, count, index,
+ charptr_tmp);
+ break;
+ case OP_DPOA:
+ /*
+ * DRPOST with array data
+ * ...argument 0 is variable ID
+ * ...stack 0 is array index
+ * ...stack 1 is count
+ */
+ if (!altera_check_stack(stack_ptr, 2, &status))
+ break;
+ index = stack[--stack_ptr];
+ count = stack[--stack_ptr];
+
+ if (version > 0)
+ /*
+ * stack 0 = array right index
+ * stack 1 = array left index
+ */
+ count = 1 + count - index;
+
+ charptr_tmp = (u8 *)vars[args[0]];
+ status = altera_set_dr_post(&astate->js, count, index,
+ charptr_tmp);
+ break;
+ case OP_IPRA:
+ /*
+ * IRPRE with array data
+ * ...argument 0 is variable ID
+ * ...stack 0 is array index
+ * ...stack 1 is count
+ */
+ if (!altera_check_stack(stack_ptr, 2, &status))
+ break;
+ index = stack[--stack_ptr];
+ count = stack[--stack_ptr];
+
+ if (version > 0)
+ /*
+ * stack 0 = array right index
+ * stack 1 = array left index
+ */
+ count = 1 + count - index;
+
+ charptr_tmp = (u8 *)vars[args[0]];
+ status = altera_set_ir_pre(&astate->js, count, index,
+ charptr_tmp);
+
+ break;
+ case OP_IPOA:
+ /*
+ * IRPOST with array data
+ * ...argument 0 is variable ID
+ * ...stack 0 is array index
+ * ...stack 1 is count
+ */
+ if (!altera_check_stack(stack_ptr, 2, &status))
+ break;
+ index = stack[--stack_ptr];
+ count = stack[--stack_ptr];
+
+ if (version > 0)
+ /*
+ * stack 0 = array right index
+ * stack 1 = array left index
+ */
+ count = 1 + count - index;
+
+ charptr_tmp = (u8 *)vars[args[0]];
+ status = altera_set_ir_post(&astate->js, count, index,
+ charptr_tmp);
+
+ break;
+ case OP_EXPT:
+ /*
+ * EXPORT
+ * ...argument 0 is string ID
+ * ...stack 0 is integer expression
+ */
+ if (altera_check_stack(stack_ptr, 1, &status)) {
+ name = &p[str_table + args[0]];
+ long_tmp = stack[--stack_ptr];
+ altera_export_int(name, long_tmp);
+ }
+ break;
+ case OP_PSHE:
+ /*
+ * Push integer array element
+ * ...argument 0 is variable ID
+ * ...stack 0 is array index
+ */
+ if (!altera_check_stack(stack_ptr, 1, &status))
+ break;
+ variable_id = args[0];
+ index = stack[stack_ptr - 1];
+
+ /* check variable type */
+ if ((attrs[variable_id] & 0x1f) == 0x19) {
+ /* writable integer array */
+ longptr_tmp = (long *)vars[variable_id];
+ stack[stack_ptr - 1] = longptr_tmp[index];
+ } else if ((attrs[variable_id] & 0x1f) == 0x1c) {
+ /* read-only integer array */
+ long_tmp = vars[variable_id] +
+ (index * sizeof(long));
+ stack[stack_ptr - 1] =
+ get_unaligned_be32(&p[long_tmp]);
+ } else
+ status = -ERANGE;
+
+ break;
+ case OP_PSHA:
+ /*
+ * Push Boolean array
+ * ...argument 0 is variable ID
+ * ...stack 0 is count
+ * ...stack 1 is array index
+ */
+ if (!altera_check_stack(stack_ptr, 2, &status))
+ break;
+ variable_id = args[0];
+
+ /* check that variable is a Boolean array */
+ if ((attrs[variable_id] & 0x18) != 0x08) {
+ status = -ERANGE;
+ break;
+ }
+
+ charptr_tmp = (u8 *)vars[variable_id];
+
+ /* pop the count (number of bits to copy) */
+ count = stack[--stack_ptr];
+
+ /* pop the array index */
+ index = stack[stack_ptr - 1];
+
+ if (version > 0)
+ /*
+ * stack 0 = array right index
+ * stack 1 = array left index
+ */
+ count = 1 + count - index;
+
+ if ((count < 1) || (count > 32)) {
+ status = -ERANGE;
+ break;
+ }
+
+ long_tmp = 0L;
+
+ for (i = 0; i < count; ++i)
+ if (charptr_tmp[(i + index) >> 3] &
+ (1 << ((i + index) & 7)))
+ long_tmp |= (1L << i);
+
+ stack[stack_ptr - 1] = long_tmp;
+
+ break;
+ case OP_DYNA:
+ /*
+ * Dynamically change size of array
+ * ...argument 0 is variable ID
+ * ...stack 0 is new size
+ */
+ if (!altera_check_stack(stack_ptr, 1, &status))
+ break;
+ variable_id = args[0];
+ long_tmp = stack[--stack_ptr];
+
+ if (long_tmp > var_size[variable_id]) {
+ var_size[variable_id] = long_tmp;
+
+ if (attrs[variable_id] & 0x10)
+ /* allocate integer array */
+ long_tmp *= sizeof(long);
+ else
+ /* allocate Boolean array */
+ long_tmp = (long_tmp + 7) >> 3;
+
+ /*
+ * If the buffer was previously allocated,
+ * free it
+ */
+ if (attrs[variable_id] & 0x80) {
+ kfree((void *)vars[variable_id]);
+ vars[variable_id] = 0;
+ }
+
+ /*
+ * Allocate a new buffer
+ * of the requested size
+ */
+ vars[variable_id] = (long)
+ kzalloc(long_tmp, GFP_KERNEL);
+
+ if (vars[variable_id] == 0) {
+ status = -ENOMEM;
+ break;
+ }
+
+ /*
+ * Set the attribute bit to indicate that
+ * this buffer was dynamically allocated and
+ * should be freed later
+ */
+ attrs[variable_id] |= 0x80;
+
+ /* zero out memory */
+ count = ((var_size[variable_id] + 7L) /
+ 8L);
+ charptr_tmp = (u8 *)(vars[variable_id]);
+ for (index = 0; index < count; ++index)
+ charptr_tmp[index] = 0;
+
+ }
+
+ break;
+ case OP_EXPV:
+ /*
+ * Export Boolean array
+ * ...argument 0 is string ID
+ * ...stack 0 is variable ID
+ * ...stack 1 is array right index
+ * ...stack 2 is array left index
+ */
+ if (!altera_check_stack(stack_ptr, 3, &status))
+ break;
+ if (version == 0) {
+ /* EXPV is not supported in JBC 1.0 */
+ bad_opcode = 1;
+ break;
+ }
+ name = &p[str_table + args[0]];
+ variable_id = stack[--stack_ptr];
+ long_idx = stack[--stack_ptr];/* right indx */
+ long_idx2 = stack[--stack_ptr];/* left indx */
+
+ if (long_idx > long_idx2) {
+ /* reverse indices not supported */
+ status = -ERANGE;
+ break;
+ }
+
+ long_count = 1 + long_idx2 - long_idx;
+
+ charptr_tmp = (u8 *)vars[variable_id];
+ charptr_tmp2 = NULL;
+
+ if ((long_idx & 7L) != 0) {
+ s32 k = long_idx;
+ charptr_tmp2 =
+ kzalloc(((long_count + 7L) / 8L),
+ GFP_KERNEL);
+ if (charptr_tmp2 == NULL) {
+ status = -ENOMEM;
+ break;
+ }
+
+ for (i = 0; i < long_count; ++i) {
+ if (charptr_tmp[k >> 3] &
+ (1 << (k & 7)))
+ charptr_tmp2[i >> 3] |=
+ (1 << (i & 7));
+ else
+ charptr_tmp2[i >> 3] &=
+ ~(1 << (i & 7));
+
+ ++k;
+ }
+ charptr_tmp = charptr_tmp2;
+
+ } else if (long_idx != 0)
+ charptr_tmp = &charptr_tmp[long_idx >> 3];
+
+ altera_export_bool_array(name, charptr_tmp,
+ long_count);
+
+ /* free allocated buffer */
+ if ((long_idx & 7L) != 0)
+ kfree(charptr_tmp2);
+
+ break;
+ case OP_COPY: {
+ /*
+ * Array copy
+ * ...argument 0 is dest ID
+ * ...argument 1 is source ID
+ * ...stack 0 is count
+ * ...stack 1 is dest index
+ * ...stack 2 is source index
+ */
+ s32 copy_count;
+ s32 copy_index;
+ s32 copy_index2;
+ s32 destleft;
+ s32 src_count;
+ s32 dest_count;
+ int src_reverse = 0;
+ int dest_reverse = 0;
+
+ if (!altera_check_stack(stack_ptr, 3, &status))
+ break;
+
+ copy_count = stack[--stack_ptr];
+ copy_index = stack[--stack_ptr];
+ copy_index2 = stack[--stack_ptr];
+ reverse = 0;
+
+ if (version > 0) {
+ /*
+ * stack 0 = source right index
+ * stack 1 = source left index
+ * stack 2 = destination right index
+ * stack 3 = destination left index
+ */
+ destleft = stack[--stack_ptr];
+
+ if (copy_count > copy_index) {
+ src_reverse = 1;
+ reverse = 1;
+ src_count = 1 + copy_count - copy_index;
+ /* copy_index = source start index */
+ } else {
+ src_count = 1 + copy_index - copy_count;
+ /* source start index */
+ copy_index = copy_count;
+ }
+
+ if (copy_index2 > destleft) {
+ dest_reverse = 1;
+ reverse = !reverse;
+ dest_count = 1 + copy_index2 - destleft;
+ /* destination start index */
+ copy_index2 = destleft;
+ } else
+ dest_count = 1 + destleft - copy_index2;
+
+ copy_count = (src_count < dest_count) ?
+ src_count : dest_count;
+
+ if ((src_reverse || dest_reverse) &&
+ (src_count != dest_count))
+ /*
+ * If either the source or destination
+ * is reversed, we can't tolerate
+ * a length mismatch, because we
+ * "left justify" arrays when copying.
+ * This won't work correctly
+ * with reversed arrays.
+ */
+ status = -ERANGE;
+
+ }
+
+ count = copy_count;
+ index = copy_index;
+ index2 = copy_index2;
+
+ /*
+ * If destination is a read-only array,
+ * allocate a buffer and convert it to a writable array
+ */
+ variable_id = args[1];
+ if ((version > 0) &&
+ ((attrs[variable_id] & 0x9c) == 0x0c)) {
+ /* Allocate a writable buffer for this array */
+ long_tmp =
+ (var_size[variable_id] + 7L) >> 3L;
+ charptr_tmp2 = (u8 *)vars[variable_id];
+ charptr_tmp =
+ kzalloc(long_tmp, GFP_KERNEL);
+ vars[variable_id] = (long)charptr_tmp;
+
+ if (vars[variable_id] == 0) {
+ status = -ENOMEM;
+ break;
+ }
+
+ /* zero the buffer */
+ for (long_idx = 0L; long_idx < long_tmp;
+ ++long_idx)
+ charptr_tmp[long_idx] = 0;
+
+ /* copy previous contents into buffer */
+ for (long_idx = 0L;
+ long_idx < var_size[variable_id];
+ ++long_idx) {
+ long_idx2 = long_idx;
+
+ if (charptr_tmp2[long_idx2 >> 3] &
+ (1 << (long_idx2 & 7)))
+ charptr_tmp[long_idx >> 3] |=
+ (1 << (long_idx & 7));
+
+ }
+
+ /*
+ set bit 7 - buffer was dynamically allocated */
+ attrs[variable_id] |= 0x80;
+
+ /* clear bit 2 - variable is writable */
+ attrs[variable_id] &= ~0x04;
+ attrs[variable_id] |= 0x01;
+ }
+
+ charptr_tmp = (u8 *)vars[args[1]];
+ charptr_tmp2 = (u8 *)vars[args[0]];
+
+ /* check if destination is a writable Boolean array */
+ if ((attrs[args[1]] & 0x1c) != 0x08) {
+ status = -ERANGE;
+ break;
+ }
+
+ if (count < 1) {
+ status = -ERANGE;
+ break;
+ }
+
+ if (reverse)
+ index2 += (count - 1);
+
+ for (i = 0; i < count; ++i) {
+ if (charptr_tmp2[index >> 3] &
+ (1 << (index & 7)))
+ charptr_tmp[index2 >> 3] |=
+ (1 << (index2 & 7));
+ else
+ charptr_tmp[index2 >> 3] &=
+ ~(1 << (index2 & 7));
+
+ ++index;
+ if (reverse)
+ --index2;
+ else
+ ++index2;
+ }
+
+ break;
+ }
+ case OP_DSC:
+ case OP_ISC: {
+ /*
+ * DRSCAN with capture
+ * IRSCAN with capture
+ * ...argument 0 is scan data variable ID
+ * ...argument 1 is capture variable ID
+ * ...stack 0 is capture index
+ * ...stack 1 is scan data index
+ * ...stack 2 is count
+ */
+ s32 scan_right, scan_left;
+ s32 capture_count = 0;
+ s32 scan_count = 0;
+ s32 capture_index;
+ s32 scan_index;
+
+ if (!altera_check_stack(stack_ptr, 3, &status))
+ break;
+
+ capture_index = stack[--stack_ptr];
+ scan_index = stack[--stack_ptr];
+
+ if (version > 0) {
+ /*
+ * stack 0 = capture right index
+ * stack 1 = capture left index
+ * stack 2 = scan right index
+ * stack 3 = scan left index
+ * stack 4 = count
+ */
+ scan_right = stack[--stack_ptr];
+ scan_left = stack[--stack_ptr];
+ capture_count = 1 + scan_index - capture_index;
+ scan_count = 1 + scan_left - scan_right;
+ scan_index = scan_right;
+ }
+
+ long_count = stack[--stack_ptr];
+ /*
+ * If capture array is read-only, allocate a buffer
+ * and convert it to a writable array
+ */
+ variable_id = args[1];
+ if ((version > 0) &&
+ ((attrs[variable_id] & 0x9c) == 0x0c)) {
+ /* Allocate a writable buffer for this array */
+ long_tmp =
+ (var_size[variable_id] + 7L) >> 3L;
+ charptr_tmp2 = (u8 *)vars[variable_id];
+ charptr_tmp =
+ kzalloc(long_tmp, GFP_KERNEL);
+ vars[variable_id] = (long)charptr_tmp;
+
+ if (vars[variable_id] == 0) {
+ status = -ENOMEM;
+ break;
+ }
+
+ /* zero the buffer */
+ for (long_idx = 0L; long_idx < long_tmp;
+ ++long_idx)
+ charptr_tmp[long_idx] = 0;
+
+ /* copy previous contents into buffer */
+ for (long_idx = 0L;
+ long_idx < var_size[variable_id];
+ ++long_idx) {
+ long_idx2 = long_idx;
+
+ if (charptr_tmp2[long_idx2 >> 3] &
+ (1 << (long_idx2 & 7)))
+ charptr_tmp[long_idx >> 3] |=
+ (1 << (long_idx & 7));
+
+ }
+
+ /*
+ * set bit 7 - buffer was
+ * dynamically allocated
+ */
+ attrs[variable_id] |= 0x80;
+
+ /* clear bit 2 - variable is writable */
+ attrs[variable_id] &= ~0x04;
+ attrs[variable_id] |= 0x01;
+
+ }
+
+ charptr_tmp = (u8 *)vars[args[0]];
+ charptr_tmp2 = (u8 *)vars[args[1]];
+
+ if ((version > 0) &&
+ ((long_count > capture_count) ||
+ (long_count > scan_count))) {
+ status = -ERANGE;
+ break;
+ }
+
+ /*
+ * check that capture array
+ * is a writable Boolean array
+ */
+ if ((attrs[args[1]] & 0x1c) != 0x08) {
+ status = -ERANGE;
+ break;
+ }
+
+ if (status == 0) {
+ if (opcode == 0x82) /* DSC */
+ status = altera_swap_dr(astate,
+ long_count,
+ charptr_tmp,
+ scan_index,
+ charptr_tmp2,
+ capture_index);
+ else /* ISC */
+ status = altera_swap_ir(astate,
+ long_count,
+ charptr_tmp,
+ scan_index,
+ charptr_tmp2,
+ capture_index);
+
+ }
+
+ break;
+ }
+ case OP_WAIT:
+ /*
+ * WAIT
+ * ...argument 0 is wait state
+ * ...argument 1 is end state
+ * ...stack 0 is cycles
+ * ...stack 1 is microseconds
+ */
+ if (!altera_check_stack(stack_ptr, 2, &status))
+ break;
+ long_tmp = stack[--stack_ptr];
+
+ if (long_tmp != 0L)
+ status = altera_wait_cycles(astate, long_tmp,
+ args[0]);
+
+ long_tmp = stack[--stack_ptr];
+
+ if ((status == 0) && (long_tmp != 0L))
+ status = altera_wait_msecs(astate,
+ long_tmp,
+ args[0]);
+
+ if ((status == 0) && (args[1] != args[0]))
+ status = altera_goto_jstate(astate,
+ args[1]);
+
+ if (version > 0) {
+ --stack_ptr; /* throw away MAX cycles */
+ --stack_ptr; /* throw away MAX microseconds */
+ }
+ break;
+ case OP_CMPA: {
+ /*
+ * Array compare
+ * ...argument 0 is source 1 ID
+ * ...argument 1 is source 2 ID
+ * ...argument 2 is mask ID
+ * ...stack 0 is source 1 index
+ * ...stack 1 is source 2 index
+ * ...stack 2 is mask index
+ * ...stack 3 is count
+ */
+ s32 a, b;
+ u8 *source1 = (u8 *)vars[args[0]];
+ u8 *source2 = (u8 *)vars[args[1]];
+ u8 *mask = (u8 *)vars[args[2]];
+ u32 index1;
+ u32 index2;
+ u32 mask_index;
+
+ if (!altera_check_stack(stack_ptr, 4, &status))
+ break;
+
+ index1 = stack[--stack_ptr];
+ index2 = stack[--stack_ptr];
+ mask_index = stack[--stack_ptr];
+ long_count = stack[--stack_ptr];
+
+ if (version > 0) {
+ /*
+ * stack 0 = source 1 right index
+ * stack 1 = source 1 left index
+ * stack 2 = source 2 right index
+ * stack 3 = source 2 left index
+ * stack 4 = mask right index
+ * stack 5 = mask left index
+ */
+ s32 mask_right = stack[--stack_ptr];
+ s32 mask_left = stack[--stack_ptr];
+ /* source 1 count */
+ a = 1 + index2 - index1;
+ /* source 2 count */
+ b = 1 + long_count - mask_index;
+ a = (a < b) ? a : b;
+ /* mask count */
+ b = 1 + mask_left - mask_right;
+ a = (a < b) ? a : b;
+ /* source 2 start index */
+ index2 = mask_index;
+ /* mask start index */
+ mask_index = mask_right;
+ long_count = a;
+ }
+
+ long_tmp = 1L;
+
+ if (long_count < 1)
+ status = -ERANGE;
+ else {
+ count = long_count;
+
+ for (i = 0; i < count; ++i) {
+ if (mask[mask_index >> 3] &
+ (1 << (mask_index & 7))) {
+ a = source1[index1 >> 3] &
+ (1 << (index1 & 7))
+ ? 1 : 0;
+ b = source2[index2 >> 3] &
+ (1 << (index2 & 7))
+ ? 1 : 0;
+
+ if (a != b) /* failure */
+ long_tmp = 0L;
+ }
+ ++index1;
+ ++index2;
+ ++mask_index;
+ }
+ }
+
+ stack[stack_ptr++] = long_tmp;
+
+ break;
+ }
+ default:
+ /* Unrecognized opcode -- ERROR! */
+ bad_opcode = 1;
+ break;
+ }
+
+ if (bad_opcode)
+ status = -ENOSYS;
+
+ if ((stack_ptr < 0) || (stack_ptr >= ALTERA_STACK_SIZE))
+ status = -EOVERFLOW;
+
+ if (status != 0) {
+ done = 1;
+ *error_address = (s32)(opcode_address - code_sect);
+ }
+ }
+
+ altera_free_buffers(astate);
+
+ /* Free all dynamically allocated arrays */
+ if ((attrs != NULL) && (vars != NULL))
+ for (i = 0; i < sym_count; ++i)
+ if (attrs[i] & 0x80)
+ kfree((void *)vars[i]);
+
+ kfree(vars);
+ kfree(var_size);
+ kfree(attrs);
+ kfree(proc_attributes);
+
+ return status;
+}
+
+static int altera_get_note(u8 *p, s32 program_size,
+ s32 *offset, char *key, char *value, int length)
+/*
+ * Gets key and value of NOTE fields in the JBC file.
+ * Can be called in two modes: if offset pointer is NULL,
+ * then the function searches for note fields which match
+ * the key string provided. If offset is not NULL, then
+ * the function finds the next note field of any key,
+ * starting at the offset specified by the offset pointer.
+ * Returns 0 for success, else appropriate error code
+ */
+{
+ int status = -ENODATA;
+ u32 note_strings = 0L;
+ u32 note_table = 0L;
+ u32 note_count = 0L;
+ u32 first_word = 0L;
+ int version = 0;
+ int delta = 0;
+ char *key_ptr;
+ char *value_ptr;
+ int i;
+
+ /* Read header information */
+ if (program_size > 52L) {
+ first_word = get_unaligned_be32(&p[0]);
+ version = (first_word & 1L);
+ delta = version * 8;
+
+ note_strings = get_unaligned_be32(&p[8 + delta]);
+ note_table = get_unaligned_be32(&p[12 + delta]);
+ note_count = get_unaligned_be32(&p[44 + (2 * delta)]);
+ }
+
+ if ((first_word != 0x4A414D00L) && (first_word != 0x4A414D01L))
+ return -EIO;
+
+ if (note_count <= 0L)
+ return status;
+
+ if (offset == NULL) {
+ /*
+ * We will search for the first note with a specific key,
+ * and return only the value
+ */
+ for (i = 0; (i < note_count) &&
+ (status != 0); ++i) {
+ key_ptr = &p[note_strings +
+ get_unaligned_be32(
+ &p[note_table + (8 * i)])];
+ if ((strnicmp(key, key_ptr, strlen(key_ptr)) == 0) &&
+ (key != NULL)) {
+ status = 0;
+
+ value_ptr = &p[note_strings +
+ get_unaligned_be32(
+ &p[note_table + (8 * i) + 4])];
+
+ if (value != NULL)
+ strlcpy(value, value_ptr, length);
+
+ }
+ }
+ } else {
+ /*
+ * We will search for the next note, regardless of the key,
+ * and return both the value and the key
+ */
+
+ i = *offset;
+
+ if ((i >= 0) && (i < note_count)) {
+ status = 0;
+
+ if (key != NULL)
+ strlcpy(key, &p[note_strings +
+ get_unaligned_be32(
+ &p[note_table + (8 * i)])],
+ length);
+
+ if (value != NULL)
+ strlcpy(value, &p[note_strings +
+ get_unaligned_be32(
+ &p[note_table + (8 * i) + 4])],
+ length);
+
+ *offset = i + 1;
+ }
+ }
+
+ return status;
+}
+
+static int altera_check_crc(u8 *p, s32 program_size)
+{
+ int status = 0;
+ u16 local_expected = 0,
+ local_actual = 0,
+ shift_reg = 0xffff;
+ int bit, feedback;
+ u8 databyte;
+ u32 i;
+ u32 crc_section = 0L;
+ u32 first_word = 0L;
+ int version = 0;
+ int delta = 0;
+
+ if (program_size > 52L) {
+ first_word = get_unaligned_be32(&p[0]);
+ version = (first_word & 1L);
+ delta = version * 8;
+
+ crc_section = get_unaligned_be32(&p[32 + delta]);
+ }
+
+ if ((first_word != 0x4A414D00L) && (first_word != 0x4A414D01L))
+ status = -EIO;
+
+ if (crc_section >= program_size)
+ status = -EIO;
+
+ if (status == 0) {
+ local_expected = (u16)get_unaligned_be16(&p[crc_section]);
+
+ for (i = 0; i < crc_section; ++i) {
+ databyte = p[i];
+ for (bit = 0; bit < 8; bit++) {
+ feedback = (databyte ^ shift_reg) & 0x01;
+ shift_reg >>= 1;
+ if (feedback)
+ shift_reg ^= 0x8408;
+
+ databyte >>= 1;
+ }
+ }
+
+ local_actual = (u16)~shift_reg;
+
+ if (local_expected != local_actual)
+ status = -EILSEQ;
+
+ }
+
+ if (debug || status) {
+ switch (status) {
+ case 0:
+ printk(KERN_INFO "%s: CRC matched: %04x\n", __func__,
+ local_actual);
+ break;
+ case -EILSEQ:
+ printk(KERN_ERR "%s: CRC mismatch: expected %04x, "
+ "actual %04x\n", __func__, local_expected,
+ local_actual);
+ break;
+ case -ENODATA:
+ printk(KERN_ERR "%s: expected CRC not found, "
+ "actual CRC = %04x\n", __func__,
+ local_actual);
+ break;
+ case -EIO:
+ printk(KERN_ERR "%s: error: format isn't "
+ "recognized.\n", __func__);
+ break;
+ default:
+ printk(KERN_ERR "%s: CRC function returned error "
+ "code %d\n", __func__, status);
+ break;
+ }
+ }
+
+ return status;
+}
+
+static int altera_get_file_info(u8 *p,
+ s32 program_size,
+ int *format_version,
+ int *action_count,
+ int *procedure_count)
+{
+ int status = -EIO;
+ u32 first_word = 0;
+ int version = 0;
+
+ if (program_size <= 52L)
+ return status;
+
+ first_word = get_unaligned_be32(&p[0]);
+
+ if ((first_word == 0x4A414D00L) || (first_word == 0x4A414D01L)) {
+ status = 0;
+
+ version = (first_word & 1L);
+ *format_version = version + 1;
+
+ if (version > 0) {
+ *action_count = get_unaligned_be32(&p[48]);
+ *procedure_count = get_unaligned_be32(&p[52]);
+ }
+ }
+
+ return status;
+}
+
+static int altera_get_act_info(u8 *p,
+ s32 program_size,
+ int index,
+ char **name,
+ char **description,
+ struct altera_procinfo **proc_list)
+{
+ int status = -EIO;
+ struct altera_procinfo *procptr = NULL;
+ struct altera_procinfo *tmpptr = NULL;
+ u32 first_word = 0L;
+ u32 action_table = 0L;
+ u32 proc_table = 0L;
+ u32 str_table = 0L;
+ u32 note_strings = 0L;
+ u32 action_count = 0L;
+ u32 proc_count = 0L;
+ u32 act_name_id = 0L;
+ u32 act_desc_id = 0L;
+ u32 act_proc_id = 0L;
+ u32 act_proc_name = 0L;
+ u8 act_proc_attribute = 0;
+
+ if (program_size <= 52L)
+ return status;
+ /* Read header information */
+ first_word = get_unaligned_be32(&p[0]);
+
+ if (first_word != 0x4A414D01L)
+ return status;
+
+ action_table = get_unaligned_be32(&p[4]);
+ proc_table = get_unaligned_be32(&p[8]);
+ str_table = get_unaligned_be32(&p[12]);
+ note_strings = get_unaligned_be32(&p[16]);
+ action_count = get_unaligned_be32(&p[48]);
+ proc_count = get_unaligned_be32(&p[52]);
+
+ if (index >= action_count)
+ return status;
+
+ act_name_id = get_unaligned_be32(&p[action_table + (12 * index)]);
+ act_desc_id = get_unaligned_be32(&p[action_table + (12 * index) + 4]);
+ act_proc_id = get_unaligned_be32(&p[action_table + (12 * index) + 8]);
+
+ *name = &p[str_table + act_name_id];
+
+ if (act_desc_id < (note_strings - str_table))
+ *description = &p[str_table + act_desc_id];
+
+ do {
+ act_proc_name = get_unaligned_be32(
+ &p[proc_table + (13 * act_proc_id)]);
+ act_proc_attribute =
+ (p[proc_table + (13 * act_proc_id) + 8] & 0x03);
+
+ procptr =
+ kzalloc(sizeof(struct altera_procinfo),
+ GFP_KERNEL);
+
+ if (procptr == NULL)
+ status = -ENOMEM;
+ else {
+ procptr->name = &p[str_table + act_proc_name];
+ procptr->attrs = act_proc_attribute;
+ procptr->next = NULL;
+
+ /* add record to end of linked list */
+ if (*proc_list == NULL)
+ *proc_list = procptr;
+ else {
+ tmpptr = *proc_list;
+ while (tmpptr->next != NULL)
+ tmpptr = tmpptr->next;
+ tmpptr->next = procptr;
+ }
+ }
+
+ act_proc_id = get_unaligned_be32(
+ &p[proc_table + (13 * act_proc_id) + 4]);
+ } while ((act_proc_id != 0) && (act_proc_id < proc_count));
+
+ return status;
+}
+
+int altera_init(struct altera_config *config, const struct firmware *fw)
+{
+ struct altera_state *astate = NULL;
+ struct altera_procinfo *proc_list = NULL;
+ struct altera_procinfo *procptr = NULL;
+ char *key = NULL;
+ char *value = NULL;
+ char *action_name = NULL;
+ char *description = NULL;
+ int exec_result = 0;
+ int exit_code = 0;
+ int format_version = 0;
+ int action_count = 0;
+ int procedure_count = 0;
+ int index = 0;
+ s32 offset = 0L;
+ s32 error_address = 0L;
+ int retval = 0;
+
+ key = kzalloc(33, GFP_KERNEL);
+ if (!key) {
+ retval = -ENOMEM;
+ goto out;
+ }
+ value = kzalloc(257, GFP_KERNEL);
+ if (!value) {
+ retval = -ENOMEM;
+ goto free_key;
+ }
+ astate = kzalloc(sizeof(struct altera_state), GFP_KERNEL);
+ if (!astate) {
+ retval = -ENOMEM;
+ goto free_value;
+ }
+
+ astate->config = config;
+ if (!astate->config->jtag_io) {
+ dprintk(KERN_INFO "%s: using byteblaster!\n", __func__);
+ astate->config->jtag_io = netup_jtag_io_lpt;
+ }
+
+ altera_check_crc((u8 *)fw->data, fw->size);
+
+ if (debug) {
+ altera_get_file_info((u8 *)fw->data, fw->size, &format_version,
+ &action_count, &procedure_count);
+ printk(KERN_INFO "%s: File format is %s ByteCode format\n",
+ __func__, (format_version == 2) ? "Jam STAPL" :
+ "pre-standardized Jam 1.1");
+ while (altera_get_note((u8 *)fw->data, fw->size,
+ &offset, key, value, 256) == 0)
+ printk(KERN_INFO "%s: NOTE \"%s\" = \"%s\"\n",
+ __func__, key, value);
+ }
+
+ if (debug && (format_version == 2) && (action_count > 0)) {
+ printk(KERN_INFO "%s: Actions available:\n", __func__);
+ for (index = 0; index < action_count; ++index) {
+ altera_get_act_info((u8 *)fw->data, fw->size,
+ index, &action_name,
+ &description,
+ &proc_list);
+
+ if (description == NULL)
+ printk(KERN_INFO "%s: %s\n",
+ __func__,
+ action_name);
+ else
+ printk(KERN_INFO "%s: %s \"%s\"\n",
+ __func__,
+ action_name,
+ description);
+
+ procptr = proc_list;
+ while (procptr != NULL) {
+ if (procptr->attrs != 0)
+ printk(KERN_INFO "%s: %s (%s)\n",
+ __func__,
+ procptr->name,
+ (procptr->attrs == 1) ?
+ "optional" : "recommended");
+
+ proc_list = procptr->next;
+ kfree(procptr);
+ procptr = proc_list;
+ }
+ }
+
+ printk(KERN_INFO "\n");
+ }
+
+ exec_result = altera_execute(astate, (u8 *)fw->data, fw->size,
+ &error_address, &exit_code, &format_version);
+
+ if (exit_code)
+ exec_result = -EREMOTEIO;
+
+ if ((format_version == 2) && (exec_result == -EINVAL)) {
+ if (astate->config->action == NULL)
+ printk(KERN_ERR "%s: error: no action specified for "
+ "Jam STAPL file.\nprogram terminated.\n",
+ __func__);
+ else
+ printk(KERN_ERR "%s: error: action \"%s\""
+ " is not supported "
+ "for this Jam STAPL file.\n"
+ "Program terminated.\n", __func__,
+ astate->config->action);
+
+ } else if (exec_result)
+ printk(KERN_ERR "%s: error %d\n", __func__, exec_result);
+
+ kfree(astate);
+free_value:
+ kfree(value);
+free_key:
+ kfree(key);
+out:
+ return retval;
+}
+EXPORT_SYMBOL(altera_init);
diff --git a/drivers/misc/apds9802als.c b/drivers/misc/apds9802als.c
new file mode 100644
index 00000000..0314773f
--- /dev/null
+++ b/drivers/misc/apds9802als.c
@@ -0,0 +1,339 @@
+/*
+ * apds9802als.c - apds9802 ALS Driver
+ *
+ * Copyright (C) 2009 Intel Corp
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/sysfs.h>
+#include <linux/pm_runtime.h>
+
+#define ALS_MIN_RANGE_VAL 1
+#define ALS_MAX_RANGE_VAL 2
+#define POWER_STA_ENABLE 1
+#define POWER_STA_DISABLE 0
+
+#define DRIVER_NAME "apds9802als"
+
+struct als_data {
+ struct mutex mutex;
+};
+
+static ssize_t als_sensing_range_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int val;
+
+ val = i2c_smbus_read_byte_data(client, 0x81);
+ if (val < 0)
+ return val;
+ if (val & 1)
+ return sprintf(buf, "4095\n");
+ else
+ return sprintf(buf, "65535\n");
+}
+
+static int als_wait_for_data_ready(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int ret;
+ int retry = 10;
+
+ do {
+ msleep(30);
+ ret = i2c_smbus_read_byte_data(client, 0x86);
+ } while (!(ret & 0x80) && retry--);
+
+ if (!retry) {
+ dev_warn(dev, "timeout waiting for data ready\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static ssize_t als_lux0_input_data_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct als_data *data = i2c_get_clientdata(client);
+ int ret_val;
+ int temp;
+
+ /* Protect against parallel reads */
+ pm_runtime_get_sync(dev);
+ mutex_lock(&data->mutex);
+
+ /* clear EOC interrupt status */
+ i2c_smbus_write_byte(client, 0x40);
+ /* start measurement */
+ temp = i2c_smbus_read_byte_data(client, 0x81);
+ i2c_smbus_write_byte_data(client, 0x81, temp | 0x08);
+
+ ret_val = als_wait_for_data_ready(dev);
+ if (ret_val < 0)
+ goto failed;
+
+ temp = i2c_smbus_read_byte_data(client, 0x8C); /* LSB data */
+ if (temp < 0) {
+ ret_val = temp;
+ goto failed;
+ }
+ ret_val = i2c_smbus_read_byte_data(client, 0x8D); /* MSB data */
+ if (ret_val < 0)
+ goto failed;
+
+ mutex_unlock(&data->mutex);
+ pm_runtime_put_sync(dev);
+
+ temp = (ret_val << 8) | temp;
+ return sprintf(buf, "%d\n", temp);
+failed:
+ mutex_unlock(&data->mutex);
+ pm_runtime_put_sync(dev);
+ return ret_val;
+}
+
+static ssize_t als_sensing_range_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct als_data *data = i2c_get_clientdata(client);
+ int ret_val;
+ unsigned long val;
+
+ if (strict_strtoul(buf, 10, &val))
+ return -EINVAL;
+
+ if (val < 4096)
+ val = 1;
+ else if (val < 65536)
+ val = 2;
+ else
+ return -ERANGE;
+
+ pm_runtime_get_sync(dev);
+
+ /* Make sure nobody else reads/modifies/writes 0x81 while we
+ are active */
+ mutex_lock(&data->mutex);
+
+ ret_val = i2c_smbus_read_byte_data(client, 0x81);
+ if (ret_val < 0)
+ goto fail;
+
+ /* Reset the bits before setting them */
+ ret_val = ret_val & 0xFA;
+
+ if (val == 1) /* Setting detection range up to 4k LUX */
+ ret_val = (ret_val | 0x01);
+ else /* Setting detection range up to 64k LUX*/
+ ret_val = (ret_val | 0x00);
+
+ ret_val = i2c_smbus_write_byte_data(client, 0x81, ret_val);
+
+ if (ret_val >= 0) {
+ /* All OK */
+ mutex_unlock(&data->mutex);
+ pm_runtime_put_sync(dev);
+ return count;
+ }
+fail:
+ mutex_unlock(&data->mutex);
+ pm_runtime_put_sync(dev);
+ return ret_val;
+}
+
+static int als_set_power_state(struct i2c_client *client, bool on_off)
+{
+ int ret_val;
+ struct als_data *data = i2c_get_clientdata(client);
+
+ mutex_lock(&data->mutex);
+ ret_val = i2c_smbus_read_byte_data(client, 0x80);
+ if (ret_val < 0)
+ goto fail;
+ if (on_off)
+ ret_val = ret_val | 0x01;
+ else
+ ret_val = ret_val & 0xFE;
+ ret_val = i2c_smbus_write_byte_data(client, 0x80, ret_val);
+fail:
+ mutex_unlock(&data->mutex);
+ return ret_val;
+}
+
+static DEVICE_ATTR(lux0_sensor_range, S_IRUGO | S_IWUSR,
+ als_sensing_range_show, als_sensing_range_store);
+static DEVICE_ATTR(lux0_input, S_IRUGO, als_lux0_input_data_show, NULL);
+
+static struct attribute *mid_att_als[] = {
+ &dev_attr_lux0_sensor_range.attr,
+ &dev_attr_lux0_input.attr,
+ NULL
+};
+
+static struct attribute_group m_als_gr = {
+ .name = "apds9802als",
+ .attrs = mid_att_als
+};
+
+static int als_set_default_config(struct i2c_client *client)
+{
+ int ret_val;
+ /* Write the command and then switch on */
+ ret_val = i2c_smbus_write_byte_data(client, 0x80, 0x01);
+ if (ret_val < 0) {
+ dev_err(&client->dev, "failed default switch on write\n");
+ return ret_val;
+ }
+ /* detection range: 1~64K Lux, maunal measurement */
+ ret_val = i2c_smbus_write_byte_data(client, 0x81, 0x08);
+ if (ret_val < 0)
+ dev_err(&client->dev, "failed default LUX on write\n");
+
+ /* We always get 0 for the 1st measurement after system power on,
+ * so make sure it is finished before user asks for data.
+ */
+ als_wait_for_data_ready(&client->dev);
+
+ return ret_val;
+}
+
+static int apds9802als_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int res;
+ struct als_data *data;
+
+ data = kzalloc(sizeof(struct als_data), GFP_KERNEL);
+ if (data == NULL) {
+ dev_err(&client->dev, "Memory allocation failed\n");
+ return -ENOMEM;
+ }
+ i2c_set_clientdata(client, data);
+ res = sysfs_create_group(&client->dev.kobj, &m_als_gr);
+ if (res) {
+ dev_err(&client->dev, "device create file failed\n");
+ goto als_error1;
+ }
+ dev_info(&client->dev, "ALS chip found\n");
+ als_set_default_config(client);
+ mutex_init(&data->mutex);
+
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+
+ return res;
+als_error1:
+ kfree(data);
+ return res;
+}
+
+static int __devexit apds9802als_remove(struct i2c_client *client)
+{
+ struct als_data *data = i2c_get_clientdata(client);
+
+ pm_runtime_get_sync(&client->dev);
+
+ als_set_power_state(client, false);
+ sysfs_remove_group(&client->dev.kobj, &m_als_gr);
+
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+ pm_runtime_put_noidle(&client->dev);
+
+ kfree(data);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int apds9802als_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+ als_set_power_state(client, false);
+ return 0;
+}
+
+static int apds9802als_resume(struct i2c_client *client)
+{
+ als_set_default_config(client);
+ return 0;
+}
+
+static int apds9802als_runtime_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ als_set_power_state(client, false);
+ return 0;
+}
+
+static int apds9802als_runtime_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ als_set_power_state(client, true);
+ return 0;
+}
+
+static const struct dev_pm_ops apds9802als_pm_ops = {
+ .runtime_suspend = apds9802als_runtime_suspend,
+ .runtime_resume = apds9802als_runtime_resume,
+};
+
+#define APDS9802ALS_PM_OPS (&apds9802als_pm_ops)
+
+#else /* CONFIG_PM */
+#define apds9802als_suspend NULL
+#define apds9802als_resume NULL
+#define APDS9802ALS_PM_OPS NULL
+#endif /* CONFIG_PM */
+
+static struct i2c_device_id apds9802als_id[] = {
+ { DRIVER_NAME, 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, apds9802als_id);
+
+static struct i2c_driver apds9802als_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .pm = APDS9802ALS_PM_OPS,
+ },
+ .probe = apds9802als_probe,
+ .remove = __devexit_p(apds9802als_remove),
+ .suspend = apds9802als_suspend,
+ .resume = apds9802als_resume,
+ .id_table = apds9802als_id,
+};
+
+module_i2c_driver(apds9802als_driver);
+
+MODULE_AUTHOR("Anantha Narayanan <Anantha.Narayanan@intel.com");
+MODULE_DESCRIPTION("Avago apds9802als ALS Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/apds990x.c b/drivers/misc/apds990x.c
new file mode 100644
index 00000000..ee74244a
--- /dev/null
+++ b/drivers/misc/apds990x.c
@@ -0,0 +1,1286 @@
+/*
+ * This file is part of the APDS990x sensor driver.
+ * Chip is combined proximity and ambient light sensor.
+ *
+ * Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
+ *
+ * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/slab.h>
+#include <linux/i2c/apds990x.h>
+
+/* Register map */
+#define APDS990X_ENABLE 0x00 /* Enable of states and interrupts */
+#define APDS990X_ATIME 0x01 /* ALS ADC time */
+#define APDS990X_PTIME 0x02 /* Proximity ADC time */
+#define APDS990X_WTIME 0x03 /* Wait time */
+#define APDS990X_AILTL 0x04 /* ALS interrupt low threshold low byte */
+#define APDS990X_AILTH 0x05 /* ALS interrupt low threshold hi byte */
+#define APDS990X_AIHTL 0x06 /* ALS interrupt hi threshold low byte */
+#define APDS990X_AIHTH 0x07 /* ALS interrupt hi threshold hi byte */
+#define APDS990X_PILTL 0x08 /* Proximity interrupt low threshold low byte */
+#define APDS990X_PILTH 0x09 /* Proximity interrupt low threshold hi byte */
+#define APDS990X_PIHTL 0x0a /* Proximity interrupt hi threshold low byte */
+#define APDS990X_PIHTH 0x0b /* Proximity interrupt hi threshold hi byte */
+#define APDS990X_PERS 0x0c /* Interrupt persistence filters */
+#define APDS990X_CONFIG 0x0d /* Configuration */
+#define APDS990X_PPCOUNT 0x0e /* Proximity pulse count */
+#define APDS990X_CONTROL 0x0f /* Gain control register */
+#define APDS990X_REV 0x11 /* Revision Number */
+#define APDS990X_ID 0x12 /* Device ID */
+#define APDS990X_STATUS 0x13 /* Device status */
+#define APDS990X_CDATAL 0x14 /* Clear ADC low data register */
+#define APDS990X_CDATAH 0x15 /* Clear ADC high data register */
+#define APDS990X_IRDATAL 0x16 /* IR ADC low data register */
+#define APDS990X_IRDATAH 0x17 /* IR ADC high data register */
+#define APDS990X_PDATAL 0x18 /* Proximity ADC low data register */
+#define APDS990X_PDATAH 0x19 /* Proximity ADC high data register */
+
+/* Control */
+#define APDS990X_MAX_AGAIN 3
+
+/* Enable register */
+#define APDS990X_EN_PIEN (0x1 << 5)
+#define APDS990X_EN_AIEN (0x1 << 4)
+#define APDS990X_EN_WEN (0x1 << 3)
+#define APDS990X_EN_PEN (0x1 << 2)
+#define APDS990X_EN_AEN (0x1 << 1)
+#define APDS990X_EN_PON (0x1 << 0)
+#define APDS990X_EN_DISABLE_ALL 0
+
+/* Status register */
+#define APDS990X_ST_PINT (0x1 << 5)
+#define APDS990X_ST_AINT (0x1 << 4)
+
+/* I2C access types */
+#define APDS990x_CMD_TYPE_MASK (0x03 << 5)
+#define APDS990x_CMD_TYPE_RB (0x00 << 5) /* Repeated byte */
+#define APDS990x_CMD_TYPE_INC (0x01 << 5) /* Auto increment */
+#define APDS990x_CMD_TYPE_SPE (0x03 << 5) /* Special function */
+
+#define APDS990x_ADDR_SHIFT 0
+#define APDS990x_CMD 0x80
+
+/* Interrupt ack commands */
+#define APDS990X_INT_ACK_ALS 0x6
+#define APDS990X_INT_ACK_PS 0x5
+#define APDS990X_INT_ACK_BOTH 0x7
+
+/* ptime */
+#define APDS990X_PTIME_DEFAULT 0xff /* Recommended conversion time 2.7ms*/
+
+/* wtime */
+#define APDS990X_WTIME_DEFAULT 0xee /* ~50ms wait time */
+
+#define APDS990X_TIME_TO_ADC 1024 /* One timetick as ADC count value */
+
+/* Persistence */
+#define APDS990X_APERS_SHIFT 0
+#define APDS990X_PPERS_SHIFT 4
+
+/* Supported ID:s */
+#define APDS990X_ID_0 0x0
+#define APDS990X_ID_4 0x4
+#define APDS990X_ID_29 0x29
+
+/* pgain and pdiode settings */
+#define APDS_PGAIN_1X 0x0
+#define APDS_PDIODE_IR 0x2
+
+#define APDS990X_LUX_OUTPUT_SCALE 10
+
+/* Reverse chip factors for threshold calculation */
+struct reverse_factors {
+ u32 afactor;
+ int cf1;
+ int irf1;
+ int cf2;
+ int irf2;
+};
+
+struct apds990x_chip {
+ struct apds990x_platform_data *pdata;
+ struct i2c_client *client;
+ struct mutex mutex; /* avoid parallel access */
+ struct regulator_bulk_data regs[2];
+ wait_queue_head_t wait;
+
+ int prox_en;
+ bool prox_continuous_mode;
+ bool lux_wait_fresh_res;
+
+ /* Chip parameters */
+ struct apds990x_chip_factors cf;
+ struct reverse_factors rcf;
+ u16 atime; /* als integration time */
+ u16 arate; /* als reporting rate */
+ u16 a_max_result; /* Max possible ADC value with current atime */
+ u8 again_meas; /* Gain used in last measurement */
+ u8 again_next; /* Next calculated gain */
+ u8 pgain;
+ u8 pdiode;
+ u8 pdrive;
+ u8 lux_persistence;
+ u8 prox_persistence;
+
+ u32 lux_raw;
+ u32 lux;
+ u16 lux_clear;
+ u16 lux_ir;
+ u16 lux_calib;
+ u32 lux_thres_hi;
+ u32 lux_thres_lo;
+
+ u32 prox_thres;
+ u16 prox_data;
+ u16 prox_calib;
+
+ char chipname[10];
+ u8 revision;
+};
+
+#define APDS_CALIB_SCALER 8192
+#define APDS_LUX_NEUTRAL_CALIB_VALUE (1 * APDS_CALIB_SCALER)
+#define APDS_PROX_NEUTRAL_CALIB_VALUE (1 * APDS_CALIB_SCALER)
+
+#define APDS_PROX_DEF_THRES 600
+#define APDS_PROX_HYSTERESIS 50
+#define APDS_LUX_DEF_THRES_HI 101
+#define APDS_LUX_DEF_THRES_LO 100
+#define APDS_DEFAULT_PROX_PERS 1
+
+#define APDS_TIMEOUT 2000
+#define APDS_STARTUP_DELAY 25000 /* us */
+#define APDS_RANGE 65535
+#define APDS_PROX_RANGE 1023
+#define APDS_LUX_GAIN_LO_LIMIT 100
+#define APDS_LUX_GAIN_LO_LIMIT_STRICT 25
+
+#define TIMESTEP 87 /* 2.7ms is about 87 / 32 */
+#define TIME_STEP_SCALER 32
+
+#define APDS_LUX_AVERAGING_TIME 50 /* tolerates 50/60Hz ripple */
+#define APDS_LUX_DEFAULT_RATE 200
+
+static const u8 again[] = {1, 8, 16, 120}; /* ALS gain steps */
+static const u8 ir_currents[] = {100, 50, 25, 12}; /* IRled currents in mA */
+
+/* Following two tables must match i.e 10Hz rate means 1 as persistence value */
+static const u16 arates_hz[] = {10, 5, 2, 1};
+static const u8 apersis[] = {1, 2, 4, 5};
+
+/* Regulators */
+static const char reg_vcc[] = "Vdd";
+static const char reg_vled[] = "Vled";
+
+static int apds990x_read_byte(struct apds990x_chip *chip, u8 reg, u8 *data)
+{
+ struct i2c_client *client = chip->client;
+ s32 ret;
+
+ reg &= ~APDS990x_CMD_TYPE_MASK;
+ reg |= APDS990x_CMD | APDS990x_CMD_TYPE_RB;
+
+ ret = i2c_smbus_read_byte_data(client, reg);
+ *data = ret;
+ return (int)ret;
+}
+
+static int apds990x_read_word(struct apds990x_chip *chip, u8 reg, u16 *data)
+{
+ struct i2c_client *client = chip->client;
+ s32 ret;
+
+ reg &= ~APDS990x_CMD_TYPE_MASK;
+ reg |= APDS990x_CMD | APDS990x_CMD_TYPE_INC;
+
+ ret = i2c_smbus_read_word_data(client, reg);
+ *data = ret;
+ return (int)ret;
+}
+
+static int apds990x_write_byte(struct apds990x_chip *chip, u8 reg, u8 data)
+{
+ struct i2c_client *client = chip->client;
+ s32 ret;
+
+ reg &= ~APDS990x_CMD_TYPE_MASK;
+ reg |= APDS990x_CMD | APDS990x_CMD_TYPE_RB;
+
+ ret = i2c_smbus_write_byte_data(client, reg, data);
+ return (int)ret;
+}
+
+static int apds990x_write_word(struct apds990x_chip *chip, u8 reg, u16 data)
+{
+ struct i2c_client *client = chip->client;
+ s32 ret;
+
+ reg &= ~APDS990x_CMD_TYPE_MASK;
+ reg |= APDS990x_CMD | APDS990x_CMD_TYPE_INC;
+
+ ret = i2c_smbus_write_word_data(client, reg, data);
+ return (int)ret;
+}
+
+static int apds990x_mode_on(struct apds990x_chip *chip)
+{
+ /* ALS is mandatory, proximity optional */
+ u8 reg = APDS990X_EN_AIEN | APDS990X_EN_PON | APDS990X_EN_AEN |
+ APDS990X_EN_WEN;
+
+ if (chip->prox_en)
+ reg |= APDS990X_EN_PIEN | APDS990X_EN_PEN;
+
+ return apds990x_write_byte(chip, APDS990X_ENABLE, reg);
+}
+
+static u16 apds990x_lux_to_threshold(struct apds990x_chip *chip, u32 lux)
+{
+ u32 thres;
+ u32 cpl;
+ u32 ir;
+
+ if (lux == 0)
+ return 0;
+ else if (lux == APDS_RANGE)
+ return APDS_RANGE;
+
+ /*
+ * Reported LUX value is a combination of the IR and CLEAR channel
+ * values. However, interrupt threshold is only for clear channel.
+ * This function approximates needed HW threshold value for a given
+ * LUX value in the current lightning type.
+ * IR level compared to visible light varies heavily depending on the
+ * source of the light
+ *
+ * Calculate threshold value for the next measurement period.
+ * Math: threshold = lux * cpl where
+ * cpl = atime * again / (glass_attenuation * device_factor)
+ * (count-per-lux)
+ *
+ * First remove calibration. Division by four is to avoid overflow
+ */
+ lux = lux * (APDS_CALIB_SCALER / 4) / (chip->lux_calib / 4);
+
+ /* Multiplication by 64 is to increase accuracy */
+ cpl = ((u32)chip->atime * (u32)again[chip->again_next] *
+ APDS_PARAM_SCALE * 64) / (chip->cf.ga * chip->cf.df);
+
+ thres = lux * cpl / 64;
+ /*
+ * Convert IR light from the latest result to match with
+ * new gain step. This helps to adapt with the current
+ * source of light.
+ */
+ ir = (u32)chip->lux_ir * (u32)again[chip->again_next] /
+ (u32)again[chip->again_meas];
+
+ /*
+ * Compensate count with IR light impact
+ * IAC1 > IAC2 (see apds990x_get_lux for formulas)
+ */
+ if (chip->lux_clear * APDS_PARAM_SCALE >=
+ chip->rcf.afactor * chip->lux_ir)
+ thres = (chip->rcf.cf1 * thres + chip->rcf.irf1 * ir) /
+ APDS_PARAM_SCALE;
+ else
+ thres = (chip->rcf.cf2 * thres + chip->rcf.irf2 * ir) /
+ APDS_PARAM_SCALE;
+
+ if (thres >= chip->a_max_result)
+ thres = chip->a_max_result - 1;
+ return thres;
+}
+
+static inline int apds990x_set_atime(struct apds990x_chip *chip, u32 time_ms)
+{
+ u8 reg_value;
+
+ chip->atime = time_ms;
+ /* Formula is specified in the data sheet */
+ reg_value = 256 - ((time_ms * TIME_STEP_SCALER) / TIMESTEP);
+ /* Calculate max ADC value for given integration time */
+ chip->a_max_result = (u16)(256 - reg_value) * APDS990X_TIME_TO_ADC;
+ return apds990x_write_byte(chip, APDS990X_ATIME, reg_value);
+}
+
+/* Called always with mutex locked */
+static int apds990x_refresh_pthres(struct apds990x_chip *chip, int data)
+{
+ int ret, lo, hi;
+
+ /* If the chip is not in use, don't try to access it */
+ if (pm_runtime_suspended(&chip->client->dev))
+ return 0;
+
+ if (data < chip->prox_thres) {
+ lo = 0;
+ hi = chip->prox_thres;
+ } else {
+ lo = chip->prox_thres - APDS_PROX_HYSTERESIS;
+ if (chip->prox_continuous_mode)
+ hi = chip->prox_thres;
+ else
+ hi = APDS_RANGE;
+ }
+
+ ret = apds990x_write_word(chip, APDS990X_PILTL, lo);
+ ret |= apds990x_write_word(chip, APDS990X_PIHTL, hi);
+ return ret;
+}
+
+/* Called always with mutex locked */
+static int apds990x_refresh_athres(struct apds990x_chip *chip)
+{
+ int ret;
+ /* If the chip is not in use, don't try to access it */
+ if (pm_runtime_suspended(&chip->client->dev))
+ return 0;
+
+ ret = apds990x_write_word(chip, APDS990X_AILTL,
+ apds990x_lux_to_threshold(chip, chip->lux_thres_lo));
+ ret |= apds990x_write_word(chip, APDS990X_AIHTL,
+ apds990x_lux_to_threshold(chip, chip->lux_thres_hi));
+
+ return ret;
+}
+
+/* Called always with mutex locked */
+static void apds990x_force_a_refresh(struct apds990x_chip *chip)
+{
+ /* This will force ALS interrupt after the next measurement. */
+ apds990x_write_word(chip, APDS990X_AILTL, APDS_LUX_DEF_THRES_LO);
+ apds990x_write_word(chip, APDS990X_AIHTL, APDS_LUX_DEF_THRES_HI);
+}
+
+/* Called always with mutex locked */
+static void apds990x_force_p_refresh(struct apds990x_chip *chip)
+{
+ /* This will force proximity interrupt after the next measurement. */
+ apds990x_write_word(chip, APDS990X_PILTL, APDS_PROX_DEF_THRES - 1);
+ apds990x_write_word(chip, APDS990X_PIHTL, APDS_PROX_DEF_THRES);
+}
+
+/* Called always with mutex locked */
+static int apds990x_calc_again(struct apds990x_chip *chip)
+{
+ int curr_again = chip->again_meas;
+ int next_again = chip->again_meas;
+ int ret = 0;
+
+ /* Calculate suitable als gain */
+ if (chip->lux_clear == chip->a_max_result)
+ next_again -= 2; /* ALS saturated. Decrease gain by 2 steps */
+ else if (chip->lux_clear > chip->a_max_result / 2)
+ next_again--;
+ else if (chip->lux_clear < APDS_LUX_GAIN_LO_LIMIT_STRICT)
+ next_again += 2; /* Too dark. Increase gain by 2 steps */
+ else if (chip->lux_clear < APDS_LUX_GAIN_LO_LIMIT)
+ next_again++;
+
+ /* Limit gain to available range */
+ if (next_again < 0)
+ next_again = 0;
+ else if (next_again > APDS990X_MAX_AGAIN)
+ next_again = APDS990X_MAX_AGAIN;
+
+ /* Let's check can we trust the measured result */
+ if (chip->lux_clear == chip->a_max_result)
+ /* Result can be totally garbage due to saturation */
+ ret = -ERANGE;
+ else if (next_again != curr_again &&
+ chip->lux_clear < APDS_LUX_GAIN_LO_LIMIT_STRICT)
+ /*
+ * Gain is changed and measurement result is very small.
+ * Result can be totally garbage due to underflow
+ */
+ ret = -ERANGE;
+
+ chip->again_next = next_again;
+ apds990x_write_byte(chip, APDS990X_CONTROL,
+ (chip->pdrive << 6) |
+ (chip->pdiode << 4) |
+ (chip->pgain << 2) |
+ (chip->again_next << 0));
+
+ /*
+ * Error means bad result -> re-measurement is needed. The forced
+ * refresh uses fastest possible persistence setting to get result
+ * as soon as possible.
+ */
+ if (ret < 0)
+ apds990x_force_a_refresh(chip);
+ else
+ apds990x_refresh_athres(chip);
+
+ return ret;
+}
+
+/* Called always with mutex locked */
+static int apds990x_get_lux(struct apds990x_chip *chip, int clear, int ir)
+{
+ int iac, iac1, iac2; /* IR adjusted counts */
+ u32 lpc; /* Lux per count */
+
+ /* Formulas:
+ * iac1 = CF1 * CLEAR_CH - IRF1 * IR_CH
+ * iac2 = CF2 * CLEAR_CH - IRF2 * IR_CH
+ */
+ iac1 = (chip->cf.cf1 * clear - chip->cf.irf1 * ir) / APDS_PARAM_SCALE;
+ iac2 = (chip->cf.cf2 * clear - chip->cf.irf2 * ir) / APDS_PARAM_SCALE;
+
+ iac = max(iac1, iac2);
+ iac = max(iac, 0);
+
+ lpc = APDS990X_LUX_OUTPUT_SCALE * (chip->cf.df * chip->cf.ga) /
+ (u32)(again[chip->again_meas] * (u32)chip->atime);
+
+ return (iac * lpc) / APDS_PARAM_SCALE;
+}
+
+static int apds990x_ack_int(struct apds990x_chip *chip, u8 mode)
+{
+ struct i2c_client *client = chip->client;
+ s32 ret;
+ u8 reg = APDS990x_CMD | APDS990x_CMD_TYPE_SPE;
+
+ switch (mode & (APDS990X_ST_AINT | APDS990X_ST_PINT)) {
+ case APDS990X_ST_AINT:
+ reg |= APDS990X_INT_ACK_ALS;
+ break;
+ case APDS990X_ST_PINT:
+ reg |= APDS990X_INT_ACK_PS;
+ break;
+ default:
+ reg |= APDS990X_INT_ACK_BOTH;
+ break;
+ }
+
+ ret = i2c_smbus_read_byte_data(client, reg);
+ return (int)ret;
+}
+
+static irqreturn_t apds990x_irq(int irq, void *data)
+{
+ struct apds990x_chip *chip = data;
+ u8 status;
+
+ apds990x_read_byte(chip, APDS990X_STATUS, &status);
+ apds990x_ack_int(chip, status);
+
+ mutex_lock(&chip->mutex);
+ if (!pm_runtime_suspended(&chip->client->dev)) {
+ if (status & APDS990X_ST_AINT) {
+ apds990x_read_word(chip, APDS990X_CDATAL,
+ &chip->lux_clear);
+ apds990x_read_word(chip, APDS990X_IRDATAL,
+ &chip->lux_ir);
+ /* Store used gain for calculations */
+ chip->again_meas = chip->again_next;
+
+ chip->lux_raw = apds990x_get_lux(chip,
+ chip->lux_clear,
+ chip->lux_ir);
+
+ if (apds990x_calc_again(chip) == 0) {
+ /* Result is valid */
+ chip->lux = chip->lux_raw;
+ chip->lux_wait_fresh_res = false;
+ wake_up(&chip->wait);
+ sysfs_notify(&chip->client->dev.kobj,
+ NULL, "lux0_input");
+ }
+ }
+
+ if ((status & APDS990X_ST_PINT) && chip->prox_en) {
+ u16 clr_ch;
+
+ apds990x_read_word(chip, APDS990X_CDATAL, &clr_ch);
+ /*
+ * If ALS channel is saturated at min gain,
+ * proximity gives false posivite values.
+ * Just ignore them.
+ */
+ if (chip->again_meas == 0 &&
+ clr_ch == chip->a_max_result)
+ chip->prox_data = 0;
+ else
+ apds990x_read_word(chip,
+ APDS990X_PDATAL,
+ &chip->prox_data);
+
+ apds990x_refresh_pthres(chip, chip->prox_data);
+ if (chip->prox_data < chip->prox_thres)
+ chip->prox_data = 0;
+ else if (!chip->prox_continuous_mode)
+ chip->prox_data = APDS_PROX_RANGE;
+ sysfs_notify(&chip->client->dev.kobj,
+ NULL, "prox0_raw");
+ }
+ }
+ mutex_unlock(&chip->mutex);
+ return IRQ_HANDLED;
+}
+
+static int apds990x_configure(struct apds990x_chip *chip)
+{
+ /* It is recommended to use disabled mode during these operations */
+ apds990x_write_byte(chip, APDS990X_ENABLE, APDS990X_EN_DISABLE_ALL);
+
+ /* conversion and wait times for different state machince states */
+ apds990x_write_byte(chip, APDS990X_PTIME, APDS990X_PTIME_DEFAULT);
+ apds990x_write_byte(chip, APDS990X_WTIME, APDS990X_WTIME_DEFAULT);
+ apds990x_set_atime(chip, APDS_LUX_AVERAGING_TIME);
+
+ apds990x_write_byte(chip, APDS990X_CONFIG, 0);
+
+ /* Persistence levels */
+ apds990x_write_byte(chip, APDS990X_PERS,
+ (chip->lux_persistence << APDS990X_APERS_SHIFT) |
+ (chip->prox_persistence << APDS990X_PPERS_SHIFT));
+
+ apds990x_write_byte(chip, APDS990X_PPCOUNT, chip->pdata->ppcount);
+
+ /* Start with relatively small gain */
+ chip->again_meas = 1;
+ chip->again_next = 1;
+ apds990x_write_byte(chip, APDS990X_CONTROL,
+ (chip->pdrive << 6) |
+ (chip->pdiode << 4) |
+ (chip->pgain << 2) |
+ (chip->again_next << 0));
+ return 0;
+}
+
+static int apds990x_detect(struct apds990x_chip *chip)
+{
+ struct i2c_client *client = chip->client;
+ int ret;
+ u8 id;
+
+ ret = apds990x_read_byte(chip, APDS990X_ID, &id);
+ if (ret < 0) {
+ dev_err(&client->dev, "ID read failed\n");
+ return ret;
+ }
+
+ ret = apds990x_read_byte(chip, APDS990X_REV, &chip->revision);
+ if (ret < 0) {
+ dev_err(&client->dev, "REV read failed\n");
+ return ret;
+ }
+
+ switch (id) {
+ case APDS990X_ID_0:
+ case APDS990X_ID_4:
+ case APDS990X_ID_29:
+ snprintf(chip->chipname, sizeof(chip->chipname), "APDS-990x");
+ break;
+ default:
+ ret = -ENODEV;
+ break;
+ }
+ return ret;
+}
+
+#if defined(CONFIG_PM) || defined(CONFIG_PM_RUNTIME)
+static int apds990x_chip_on(struct apds990x_chip *chip)
+{
+ int err = regulator_bulk_enable(ARRAY_SIZE(chip->regs),
+ chip->regs);
+ if (err < 0)
+ return err;
+
+ usleep_range(APDS_STARTUP_DELAY, 2 * APDS_STARTUP_DELAY);
+
+ /* Refresh all configs in case of regulators were off */
+ chip->prox_data = 0;
+ apds990x_configure(chip);
+ apds990x_mode_on(chip);
+ return 0;
+}
+#endif
+
+static int apds990x_chip_off(struct apds990x_chip *chip)
+{
+ apds990x_write_byte(chip, APDS990X_ENABLE, APDS990X_EN_DISABLE_ALL);
+ regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs);
+ return 0;
+}
+
+static ssize_t apds990x_lux_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ ssize_t ret;
+ u32 result;
+ long timeout;
+
+ if (pm_runtime_suspended(dev))
+ return -EIO;
+
+ timeout = wait_event_interruptible_timeout(chip->wait,
+ !chip->lux_wait_fresh_res,
+ msecs_to_jiffies(APDS_TIMEOUT));
+ if (!timeout)
+ return -EIO;
+
+ mutex_lock(&chip->mutex);
+ result = (chip->lux * chip->lux_calib) / APDS_CALIB_SCALER;
+ if (result > (APDS_RANGE * APDS990X_LUX_OUTPUT_SCALE))
+ result = APDS_RANGE * APDS990X_LUX_OUTPUT_SCALE;
+
+ ret = sprintf(buf, "%d.%d\n",
+ result / APDS990X_LUX_OUTPUT_SCALE,
+ result % APDS990X_LUX_OUTPUT_SCALE);
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static DEVICE_ATTR(lux0_input, S_IRUGO, apds990x_lux_show, NULL);
+
+static ssize_t apds990x_lux_range_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", APDS_RANGE);
+}
+
+static DEVICE_ATTR(lux0_sensor_range, S_IRUGO, apds990x_lux_range_show, NULL);
+
+static ssize_t apds990x_lux_calib_format_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", APDS_CALIB_SCALER);
+}
+
+static DEVICE_ATTR(lux0_calibscale_default, S_IRUGO,
+ apds990x_lux_calib_format_show, NULL);
+
+static ssize_t apds990x_lux_calib_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%u\n", chip->lux_calib);
+}
+
+static ssize_t apds990x_lux_calib_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ unsigned long value;
+
+ if (strict_strtoul(buf, 0, &value))
+ return -EINVAL;
+
+ if (chip->lux_calib > APDS_RANGE)
+ return -EINVAL;
+
+ chip->lux_calib = value;
+
+ return len;
+}
+
+static DEVICE_ATTR(lux0_calibscale, S_IRUGO | S_IWUSR, apds990x_lux_calib_show,
+ apds990x_lux_calib_store);
+
+static ssize_t apds990x_rate_avail(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int i;
+ int pos = 0;
+ for (i = 0; i < ARRAY_SIZE(arates_hz); i++)
+ pos += sprintf(buf + pos, "%d ", arates_hz[i]);
+ sprintf(buf + pos - 1, "\n");
+ return pos;
+}
+
+static ssize_t apds990x_rate_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", chip->arate);
+}
+
+static int apds990x_set_arate(struct apds990x_chip *chip, int rate)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(arates_hz); i++)
+ if (rate >= arates_hz[i])
+ break;
+
+ if (i == ARRAY_SIZE(arates_hz))
+ return -EINVAL;
+
+ /* Pick up corresponding persistence value */
+ chip->lux_persistence = apersis[i];
+ chip->arate = arates_hz[i];
+
+ /* If the chip is not in use, don't try to access it */
+ if (pm_runtime_suspended(&chip->client->dev))
+ return 0;
+
+ /* Persistence levels */
+ return apds990x_write_byte(chip, APDS990X_PERS,
+ (chip->lux_persistence << APDS990X_APERS_SHIFT) |
+ (chip->prox_persistence << APDS990X_PPERS_SHIFT));
+}
+
+static ssize_t apds990x_rate_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ unsigned long value;
+ int ret;
+
+ if (strict_strtoul(buf, 0, &value))
+ return -EINVAL;
+
+ mutex_lock(&chip->mutex);
+ ret = apds990x_set_arate(chip, value);
+ mutex_unlock(&chip->mutex);
+
+ if (ret < 0)
+ return ret;
+ return len;
+}
+
+static DEVICE_ATTR(lux0_rate_avail, S_IRUGO, apds990x_rate_avail, NULL);
+
+static DEVICE_ATTR(lux0_rate, S_IRUGO | S_IWUSR, apds990x_rate_show,
+ apds990x_rate_store);
+
+static ssize_t apds990x_prox_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t ret;
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ if (pm_runtime_suspended(dev) || !chip->prox_en)
+ return -EIO;
+
+ mutex_lock(&chip->mutex);
+ ret = sprintf(buf, "%d\n", chip->prox_data);
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static DEVICE_ATTR(prox0_raw, S_IRUGO, apds990x_prox_show, NULL);
+
+static ssize_t apds990x_prox_range_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", APDS_PROX_RANGE);
+}
+
+static DEVICE_ATTR(prox0_sensor_range, S_IRUGO, apds990x_prox_range_show, NULL);
+
+static ssize_t apds990x_prox_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", chip->prox_en);
+}
+
+static ssize_t apds990x_prox_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ unsigned long value;
+
+ if (strict_strtoul(buf, 0, &value))
+ return -EINVAL;
+
+ mutex_lock(&chip->mutex);
+
+ if (!chip->prox_en)
+ chip->prox_data = 0;
+
+ if (value)
+ chip->prox_en++;
+ else if (chip->prox_en > 0)
+ chip->prox_en--;
+
+ if (!pm_runtime_suspended(dev))
+ apds990x_mode_on(chip);
+ mutex_unlock(&chip->mutex);
+ return len;
+}
+
+static DEVICE_ATTR(prox0_raw_en, S_IRUGO | S_IWUSR, apds990x_prox_enable_show,
+ apds990x_prox_enable_store);
+
+static const char reporting_modes[][9] = {"trigger", "periodic"};
+
+static ssize_t apds990x_prox_reporting_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ return sprintf(buf, "%s\n",
+ reporting_modes[!!chip->prox_continuous_mode]);
+}
+
+static ssize_t apds990x_prox_reporting_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+
+ if (sysfs_streq(buf, reporting_modes[0]))
+ chip->prox_continuous_mode = 0;
+ else if (sysfs_streq(buf, reporting_modes[1]))
+ chip->prox_continuous_mode = 1;
+ else
+ return -EINVAL;
+ return len;
+}
+
+static DEVICE_ATTR(prox0_reporting_mode, S_IRUGO | S_IWUSR,
+ apds990x_prox_reporting_mode_show,
+ apds990x_prox_reporting_mode_store);
+
+static ssize_t apds990x_prox_reporting_avail_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s %s\n", reporting_modes[0], reporting_modes[1]);
+}
+
+static DEVICE_ATTR(prox0_reporting_mode_avail, S_IRUGO | S_IWUSR,
+ apds990x_prox_reporting_avail_show, NULL);
+
+
+static ssize_t apds990x_lux_thresh_above_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", chip->lux_thres_hi);
+}
+
+static ssize_t apds990x_lux_thresh_below_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", chip->lux_thres_lo);
+}
+
+static ssize_t apds990x_set_lux_thresh(struct apds990x_chip *chip, u32 *target,
+ const char *buf)
+{
+ int ret = 0;
+ unsigned long thresh;
+
+ if (strict_strtoul(buf, 0, &thresh))
+ return -EINVAL;
+
+ if (thresh > APDS_RANGE)
+ return -EINVAL;
+
+ mutex_lock(&chip->mutex);
+ *target = thresh;
+ /*
+ * Don't update values in HW if we are still waiting for
+ * first interrupt to come after device handle open call.
+ */
+ if (!chip->lux_wait_fresh_res)
+ apds990x_refresh_athres(chip);
+ mutex_unlock(&chip->mutex);
+ return ret;
+
+}
+
+static ssize_t apds990x_lux_thresh_above_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ int ret = apds990x_set_lux_thresh(chip, &chip->lux_thres_hi, buf);
+ if (ret < 0)
+ return ret;
+ return len;
+}
+
+static ssize_t apds990x_lux_thresh_below_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ int ret = apds990x_set_lux_thresh(chip, &chip->lux_thres_lo, buf);
+ if (ret < 0)
+ return ret;
+ return len;
+}
+
+static DEVICE_ATTR(lux0_thresh_above_value, S_IRUGO | S_IWUSR,
+ apds990x_lux_thresh_above_show,
+ apds990x_lux_thresh_above_store);
+
+static DEVICE_ATTR(lux0_thresh_below_value, S_IRUGO | S_IWUSR,
+ apds990x_lux_thresh_below_show,
+ apds990x_lux_thresh_below_store);
+
+static ssize_t apds990x_prox_threshold_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", chip->prox_thres);
+}
+
+static ssize_t apds990x_prox_threshold_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ unsigned long value;
+
+ if (strict_strtoul(buf, 0, &value))
+ return -EINVAL;
+
+ if ((value > APDS_RANGE) || (value == 0) ||
+ (value < APDS_PROX_HYSTERESIS))
+ return -EINVAL;
+
+ mutex_lock(&chip->mutex);
+ chip->prox_thres = value;
+
+ apds990x_force_p_refresh(chip);
+ mutex_unlock(&chip->mutex);
+ return len;
+}
+
+static DEVICE_ATTR(prox0_thresh_above_value, S_IRUGO | S_IWUSR,
+ apds990x_prox_threshold_show,
+ apds990x_prox_threshold_store);
+
+static ssize_t apds990x_power_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", !pm_runtime_suspended(dev));
+ return 0;
+}
+
+static ssize_t apds990x_power_state_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ unsigned long value;
+
+ if (strict_strtoul(buf, 0, &value))
+ return -EINVAL;
+ if (value) {
+ pm_runtime_get_sync(dev);
+ mutex_lock(&chip->mutex);
+ chip->lux_wait_fresh_res = true;
+ apds990x_force_a_refresh(chip);
+ apds990x_force_p_refresh(chip);
+ mutex_unlock(&chip->mutex);
+ } else {
+ if (!pm_runtime_suspended(dev))
+ pm_runtime_put(dev);
+ }
+ return len;
+}
+
+static DEVICE_ATTR(power_state, S_IRUGO | S_IWUSR,
+ apds990x_power_state_show,
+ apds990x_power_state_store);
+
+static ssize_t apds990x_chip_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ return sprintf(buf, "%s %d\n", chip->chipname, chip->revision);
+}
+
+static DEVICE_ATTR(chip_id, S_IRUGO, apds990x_chip_id_show, NULL);
+
+static struct attribute *sysfs_attrs_ctrl[] = {
+ &dev_attr_lux0_calibscale.attr,
+ &dev_attr_lux0_calibscale_default.attr,
+ &dev_attr_lux0_input.attr,
+ &dev_attr_lux0_sensor_range.attr,
+ &dev_attr_lux0_rate.attr,
+ &dev_attr_lux0_rate_avail.attr,
+ &dev_attr_lux0_thresh_above_value.attr,
+ &dev_attr_lux0_thresh_below_value.attr,
+ &dev_attr_prox0_raw_en.attr,
+ &dev_attr_prox0_raw.attr,
+ &dev_attr_prox0_sensor_range.attr,
+ &dev_attr_prox0_thresh_above_value.attr,
+ &dev_attr_prox0_reporting_mode.attr,
+ &dev_attr_prox0_reporting_mode_avail.attr,
+ &dev_attr_chip_id.attr,
+ &dev_attr_power_state.attr,
+ NULL
+};
+
+static struct attribute_group apds990x_attribute_group[] = {
+ {.attrs = sysfs_attrs_ctrl },
+};
+
+static int __devinit apds990x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct apds990x_chip *chip;
+ int err;
+
+ chip = kzalloc(sizeof *chip, GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, chip);
+ chip->client = client;
+
+ init_waitqueue_head(&chip->wait);
+ mutex_init(&chip->mutex);
+ chip->pdata = client->dev.platform_data;
+
+ if (chip->pdata == NULL) {
+ dev_err(&client->dev, "platform data is mandatory\n");
+ err = -EINVAL;
+ goto fail1;
+ }
+
+ if (chip->pdata->cf.ga == 0) {
+ /* set uncovered sensor default parameters */
+ chip->cf.ga = 1966; /* 0.48 * APDS_PARAM_SCALE */
+ chip->cf.cf1 = 4096; /* 1.00 * APDS_PARAM_SCALE */
+ chip->cf.irf1 = 9134; /* 2.23 * APDS_PARAM_SCALE */
+ chip->cf.cf2 = 2867; /* 0.70 * APDS_PARAM_SCALE */
+ chip->cf.irf2 = 5816; /* 1.42 * APDS_PARAM_SCALE */
+ chip->cf.df = 52;
+ } else {
+ chip->cf = chip->pdata->cf;
+ }
+
+ /* precalculate inverse chip factors for threshold control */
+ chip->rcf.afactor =
+ (chip->cf.irf1 - chip->cf.irf2) * APDS_PARAM_SCALE /
+ (chip->cf.cf1 - chip->cf.cf2);
+ chip->rcf.cf1 = APDS_PARAM_SCALE * APDS_PARAM_SCALE /
+ chip->cf.cf1;
+ chip->rcf.irf1 = chip->cf.irf1 * APDS_PARAM_SCALE /
+ chip->cf.cf1;
+ chip->rcf.cf2 = APDS_PARAM_SCALE * APDS_PARAM_SCALE /
+ chip->cf.cf2;
+ chip->rcf.irf2 = chip->cf.irf2 * APDS_PARAM_SCALE /
+ chip->cf.cf2;
+
+ /* Set something to start with */
+ chip->lux_thres_hi = APDS_LUX_DEF_THRES_HI;
+ chip->lux_thres_lo = APDS_LUX_DEF_THRES_LO;
+ chip->lux_calib = APDS_LUX_NEUTRAL_CALIB_VALUE;
+
+ chip->prox_thres = APDS_PROX_DEF_THRES;
+ chip->pdrive = chip->pdata->pdrive;
+ chip->pdiode = APDS_PDIODE_IR;
+ chip->pgain = APDS_PGAIN_1X;
+ chip->prox_calib = APDS_PROX_NEUTRAL_CALIB_VALUE;
+ chip->prox_persistence = APDS_DEFAULT_PROX_PERS;
+ chip->prox_continuous_mode = false;
+
+ chip->regs[0].supply = reg_vcc;
+ chip->regs[1].supply = reg_vled;
+
+ err = regulator_bulk_get(&client->dev,
+ ARRAY_SIZE(chip->regs), chip->regs);
+ if (err < 0) {
+ dev_err(&client->dev, "Cannot get regulators\n");
+ goto fail1;
+ }
+
+ err = regulator_bulk_enable(ARRAY_SIZE(chip->regs), chip->regs);
+ if (err < 0) {
+ dev_err(&client->dev, "Cannot enable regulators\n");
+ goto fail2;
+ }
+
+ usleep_range(APDS_STARTUP_DELAY, 2 * APDS_STARTUP_DELAY);
+
+ err = apds990x_detect(chip);
+ if (err < 0) {
+ dev_err(&client->dev, "APDS990X not found\n");
+ goto fail3;
+ }
+
+ pm_runtime_set_active(&client->dev);
+
+ apds990x_configure(chip);
+ apds990x_set_arate(chip, APDS_LUX_DEFAULT_RATE);
+ apds990x_mode_on(chip);
+
+ pm_runtime_enable(&client->dev);
+
+ if (chip->pdata->setup_resources) {
+ err = chip->pdata->setup_resources();
+ if (err) {
+ err = -EINVAL;
+ goto fail3;
+ }
+ }
+
+ err = sysfs_create_group(&chip->client->dev.kobj,
+ apds990x_attribute_group);
+ if (err < 0) {
+ dev_err(&chip->client->dev, "Sysfs registration failed\n");
+ goto fail4;
+ }
+
+ err = request_threaded_irq(client->irq, NULL,
+ apds990x_irq,
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW |
+ IRQF_ONESHOT,
+ "apds990x", chip);
+ if (err) {
+ dev_err(&client->dev, "could not get IRQ %d\n",
+ client->irq);
+ goto fail5;
+ }
+ return err;
+fail5:
+ sysfs_remove_group(&chip->client->dev.kobj,
+ &apds990x_attribute_group[0]);
+fail4:
+ if (chip->pdata && chip->pdata->release_resources)
+ chip->pdata->release_resources();
+fail3:
+ regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs);
+fail2:
+ regulator_bulk_free(ARRAY_SIZE(chip->regs), chip->regs);
+fail1:
+ kfree(chip);
+ return err;
+}
+
+static int __devexit apds990x_remove(struct i2c_client *client)
+{
+ struct apds990x_chip *chip = i2c_get_clientdata(client);
+
+ free_irq(client->irq, chip);
+ sysfs_remove_group(&chip->client->dev.kobj,
+ apds990x_attribute_group);
+
+ if (chip->pdata && chip->pdata->release_resources)
+ chip->pdata->release_resources();
+
+ if (!pm_runtime_suspended(&client->dev))
+ apds990x_chip_off(chip);
+
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+
+ regulator_bulk_free(ARRAY_SIZE(chip->regs), chip->regs);
+
+ kfree(chip);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int apds990x_suspend(struct device *dev)
+{
+ struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct apds990x_chip *chip = i2c_get_clientdata(client);
+
+ apds990x_chip_off(chip);
+ return 0;
+}
+
+static int apds990x_resume(struct device *dev)
+{
+ struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct apds990x_chip *chip = i2c_get_clientdata(client);
+
+ /*
+ * If we were enabled at suspend time, it is expected
+ * everything works nice and smoothly. Chip_on is enough
+ */
+ apds990x_chip_on(chip);
+
+ return 0;
+}
+#else
+#define apds990x_suspend NULL
+#define apds990x_resume NULL
+#define apds990x_shutdown NULL
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int apds990x_runtime_suspend(struct device *dev)
+{
+ struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct apds990x_chip *chip = i2c_get_clientdata(client);
+
+ apds990x_chip_off(chip);
+ return 0;
+}
+
+static int apds990x_runtime_resume(struct device *dev)
+{
+ struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct apds990x_chip *chip = i2c_get_clientdata(client);
+
+ apds990x_chip_on(chip);
+ return 0;
+}
+
+#endif
+
+static const struct i2c_device_id apds990x_id[] = {
+ {"apds990x", 0 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, apds990x_id);
+
+static const struct dev_pm_ops apds990x_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(apds990x_suspend, apds990x_resume)
+ SET_RUNTIME_PM_OPS(apds990x_runtime_suspend,
+ apds990x_runtime_resume,
+ NULL)
+};
+
+static struct i2c_driver apds990x_driver = {
+ .driver = {
+ .name = "apds990x",
+ .owner = THIS_MODULE,
+ .pm = &apds990x_pm_ops,
+ },
+ .probe = apds990x_probe,
+ .remove = __devexit_p(apds990x_remove),
+ .id_table = apds990x_id,
+};
+
+module_i2c_driver(apds990x_driver);
+
+MODULE_DESCRIPTION("APDS990X combined ALS and proximity sensor");
+MODULE_AUTHOR("Samu Onkalo, Nokia Corporation");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/arm-charlcd.c b/drivers/misc/arm-charlcd.c
new file mode 100644
index 00000000..fe8616a8
--- /dev/null
+++ b/drivers/misc/arm-charlcd.c
@@ -0,0 +1,396 @@
+/*
+ * Driver for the on-board character LCD found on some ARM reference boards
+ * This is basically an Hitachi HD44780 LCD with a custom IP block to drive it
+ * http://en.wikipedia.org/wiki/HD44780_Character_LCD
+ * Currently it will just display the text "ARM Linux" and the linux version
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ * Author: Linus Walleij <triad@df.lth.se>
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <generated/utsrelease.h>
+
+#define DRIVERNAME "arm-charlcd"
+#define CHARLCD_TIMEOUT (msecs_to_jiffies(1000))
+
+/* Offsets to registers */
+#define CHAR_COM 0x00U
+#define CHAR_DAT 0x04U
+#define CHAR_RD 0x08U
+#define CHAR_RAW 0x0CU
+#define CHAR_MASK 0x10U
+#define CHAR_STAT 0x14U
+
+#define CHAR_RAW_CLEAR 0x00000000U
+#define CHAR_RAW_VALID 0x00000100U
+
+/* Hitachi HD44780 display commands */
+#define HD_CLEAR 0x01U
+#define HD_HOME 0x02U
+#define HD_ENTRYMODE 0x04U
+#define HD_ENTRYMODE_INCREMENT 0x02U
+#define HD_ENTRYMODE_SHIFT 0x01U
+#define HD_DISPCTRL 0x08U
+#define HD_DISPCTRL_ON 0x04U
+#define HD_DISPCTRL_CURSOR_ON 0x02U
+#define HD_DISPCTRL_CURSOR_BLINK 0x01U
+#define HD_CRSR_SHIFT 0x10U
+#define HD_CRSR_SHIFT_DISPLAY 0x08U
+#define HD_CRSR_SHIFT_DISPLAY_RIGHT 0x04U
+#define HD_FUNCSET 0x20U
+#define HD_FUNCSET_8BIT 0x10U
+#define HD_FUNCSET_2_LINES 0x08U
+#define HD_FUNCSET_FONT_5X10 0x04U
+#define HD_SET_CGRAM 0x40U
+#define HD_SET_DDRAM 0x80U
+#define HD_BUSY_FLAG 0x80U
+
+/**
+ * @dev: a pointer back to containing device
+ * @phybase: the offset to the controller in physical memory
+ * @physize: the size of the physical page
+ * @virtbase: the offset to the controller in virtual memory
+ * @irq: reserved interrupt number
+ * @complete: completion structure for the last LCD command
+ */
+struct charlcd {
+ struct device *dev;
+ u32 phybase;
+ u32 physize;
+ void __iomem *virtbase;
+ int irq;
+ struct completion complete;
+ struct delayed_work init_work;
+};
+
+static irqreturn_t charlcd_interrupt(int irq, void *data)
+{
+ struct charlcd *lcd = data;
+ u8 status;
+
+ status = readl(lcd->virtbase + CHAR_STAT) & 0x01;
+ /* Clear IRQ */
+ writel(CHAR_RAW_CLEAR, lcd->virtbase + CHAR_RAW);
+ if (status)
+ complete(&lcd->complete);
+ else
+ dev_info(lcd->dev, "Spurious IRQ (%02x)\n", status);
+ return IRQ_HANDLED;
+}
+
+
+static void charlcd_wait_complete_irq(struct charlcd *lcd)
+{
+ int ret;
+
+ ret = wait_for_completion_interruptible_timeout(&lcd->complete,
+ CHARLCD_TIMEOUT);
+ /* Disable IRQ after completion */
+ writel(0x00, lcd->virtbase + CHAR_MASK);
+
+ if (ret < 0) {
+ dev_err(lcd->dev,
+ "wait_for_completion_interruptible_timeout() "
+ "returned %d waiting for ready\n", ret);
+ return;
+ }
+
+ if (ret == 0) {
+ dev_err(lcd->dev, "charlcd controller timed out "
+ "waiting for ready\n");
+ return;
+ }
+}
+
+static u8 charlcd_4bit_read_char(struct charlcd *lcd)
+{
+ u8 data;
+ u32 val;
+ int i;
+
+ /* If we can, use an IRQ to wait for the data, else poll */
+ if (lcd->irq >= 0)
+ charlcd_wait_complete_irq(lcd);
+ else {
+ i = 0;
+ val = 0;
+ while (!(val & CHAR_RAW_VALID) && i < 10) {
+ udelay(100);
+ val = readl(lcd->virtbase + CHAR_RAW);
+ i++;
+ }
+
+ writel(CHAR_RAW_CLEAR, lcd->virtbase + CHAR_RAW);
+ }
+ msleep(1);
+
+ /* Read the 4 high bits of the data */
+ data = readl(lcd->virtbase + CHAR_RD) & 0xf0;
+
+ /*
+ * The second read for the low bits does not trigger an IRQ
+ * so in this case we have to poll for the 4 lower bits
+ */
+ i = 0;
+ val = 0;
+ while (!(val & CHAR_RAW_VALID) && i < 10) {
+ udelay(100);
+ val = readl(lcd->virtbase + CHAR_RAW);
+ i++;
+ }
+ writel(CHAR_RAW_CLEAR, lcd->virtbase + CHAR_RAW);
+ msleep(1);
+
+ /* Read the 4 low bits of the data */
+ data |= (readl(lcd->virtbase + CHAR_RD) >> 4) & 0x0f;
+
+ return data;
+}
+
+static bool charlcd_4bit_read_bf(struct charlcd *lcd)
+{
+ if (lcd->irq >= 0) {
+ /*
+ * If we'll use IRQs to wait for the busyflag, clear any
+ * pending flag and enable IRQ
+ */
+ writel(CHAR_RAW_CLEAR, lcd->virtbase + CHAR_RAW);
+ init_completion(&lcd->complete);
+ writel(0x01, lcd->virtbase + CHAR_MASK);
+ }
+ readl(lcd->virtbase + CHAR_COM);
+ return charlcd_4bit_read_char(lcd) & HD_BUSY_FLAG ? true : false;
+}
+
+static void charlcd_4bit_wait_busy(struct charlcd *lcd)
+{
+ int retries = 50;
+
+ udelay(100);
+ while (charlcd_4bit_read_bf(lcd) && retries)
+ retries--;
+ if (!retries)
+ dev_err(lcd->dev, "timeout waiting for busyflag\n");
+}
+
+static void charlcd_4bit_command(struct charlcd *lcd, u8 cmd)
+{
+ u32 cmdlo = (cmd << 4) & 0xf0;
+ u32 cmdhi = (cmd & 0xf0);
+
+ writel(cmdhi, lcd->virtbase + CHAR_COM);
+ udelay(10);
+ writel(cmdlo, lcd->virtbase + CHAR_COM);
+ charlcd_4bit_wait_busy(lcd);
+}
+
+static void charlcd_4bit_char(struct charlcd *lcd, u8 ch)
+{
+ u32 chlo = (ch << 4) & 0xf0;
+ u32 chhi = (ch & 0xf0);
+
+ writel(chhi, lcd->virtbase + CHAR_DAT);
+ udelay(10);
+ writel(chlo, lcd->virtbase + CHAR_DAT);
+ charlcd_4bit_wait_busy(lcd);
+}
+
+static void charlcd_4bit_print(struct charlcd *lcd, int line, const char *str)
+{
+ u8 offset;
+ int i;
+
+ /*
+ * We support line 0, 1
+ * Line 1 runs from 0x00..0x27
+ * Line 2 runs from 0x28..0x4f
+ */
+ if (line == 0)
+ offset = 0;
+ else if (line == 1)
+ offset = 0x28;
+ else
+ return;
+
+ /* Set offset */
+ charlcd_4bit_command(lcd, HD_SET_DDRAM | offset);
+
+ /* Send string */
+ for (i = 0; i < strlen(str) && i < 0x28; i++)
+ charlcd_4bit_char(lcd, str[i]);
+}
+
+static void charlcd_4bit_init(struct charlcd *lcd)
+{
+ /* These commands cannot be checked with the busy flag */
+ writel(HD_FUNCSET | HD_FUNCSET_8BIT, lcd->virtbase + CHAR_COM);
+ msleep(5);
+ writel(HD_FUNCSET | HD_FUNCSET_8BIT, lcd->virtbase + CHAR_COM);
+ udelay(100);
+ writel(HD_FUNCSET | HD_FUNCSET_8BIT, lcd->virtbase + CHAR_COM);
+ udelay(100);
+ /* Go to 4bit mode */
+ writel(HD_FUNCSET, lcd->virtbase + CHAR_COM);
+ udelay(100);
+ /*
+ * 4bit mode, 2 lines, 5x8 font, after this the number of lines
+ * and the font cannot be changed until the next initialization sequence
+ */
+ charlcd_4bit_command(lcd, HD_FUNCSET | HD_FUNCSET_2_LINES);
+ charlcd_4bit_command(lcd, HD_DISPCTRL | HD_DISPCTRL_ON);
+ charlcd_4bit_command(lcd, HD_ENTRYMODE | HD_ENTRYMODE_INCREMENT);
+ charlcd_4bit_command(lcd, HD_CLEAR);
+ charlcd_4bit_command(lcd, HD_HOME);
+ /* Put something useful in the display */
+ charlcd_4bit_print(lcd, 0, "ARM Linux");
+ charlcd_4bit_print(lcd, 1, UTS_RELEASE);
+}
+
+static void charlcd_init_work(struct work_struct *work)
+{
+ struct charlcd *lcd =
+ container_of(work, struct charlcd, init_work.work);
+
+ charlcd_4bit_init(lcd);
+}
+
+static int __init charlcd_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct charlcd *lcd;
+ struct resource *res;
+
+ lcd = kzalloc(sizeof(struct charlcd), GFP_KERNEL);
+ if (!lcd)
+ return -ENOMEM;
+
+ lcd->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -ENOENT;
+ goto out_no_resource;
+ }
+ lcd->phybase = res->start;
+ lcd->physize = resource_size(res);
+
+ if (request_mem_region(lcd->phybase, lcd->physize,
+ DRIVERNAME) == NULL) {
+ ret = -EBUSY;
+ goto out_no_memregion;
+ }
+
+ lcd->virtbase = ioremap(lcd->phybase, lcd->physize);
+ if (!lcd->virtbase) {
+ ret = -ENOMEM;
+ goto out_no_remap;
+ }
+
+ lcd->irq = platform_get_irq(pdev, 0);
+ /* If no IRQ is supplied, we'll survive without it */
+ if (lcd->irq >= 0) {
+ if (request_irq(lcd->irq, charlcd_interrupt, IRQF_DISABLED,
+ DRIVERNAME, lcd)) {
+ ret = -EIO;
+ goto out_no_irq;
+ }
+ }
+
+ platform_set_drvdata(pdev, lcd);
+
+ /*
+ * Initialize the display in a delayed work, because
+ * it is VERY slow and would slow down the boot of the system.
+ */
+ INIT_DELAYED_WORK(&lcd->init_work, charlcd_init_work);
+ schedule_delayed_work(&lcd->init_work, 0);
+
+ dev_info(&pdev->dev, "initialized ARM character LCD at %08x\n",
+ lcd->phybase);
+
+ return 0;
+
+out_no_irq:
+ iounmap(lcd->virtbase);
+out_no_remap:
+ platform_set_drvdata(pdev, NULL);
+out_no_memregion:
+ release_mem_region(lcd->phybase, SZ_4K);
+out_no_resource:
+ kfree(lcd);
+ return ret;
+}
+
+static int __exit charlcd_remove(struct platform_device *pdev)
+{
+ struct charlcd *lcd = platform_get_drvdata(pdev);
+
+ if (lcd) {
+ free_irq(lcd->irq, lcd);
+ iounmap(lcd->virtbase);
+ release_mem_region(lcd->phybase, lcd->physize);
+ platform_set_drvdata(pdev, NULL);
+ kfree(lcd);
+ }
+
+ return 0;
+}
+
+static int charlcd_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct charlcd *lcd = platform_get_drvdata(pdev);
+
+ /* Power the display off */
+ charlcd_4bit_command(lcd, HD_DISPCTRL);
+ return 0;
+}
+
+static int charlcd_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct charlcd *lcd = platform_get_drvdata(pdev);
+
+ /* Turn the display back on */
+ charlcd_4bit_command(lcd, HD_DISPCTRL | HD_DISPCTRL_ON);
+ return 0;
+}
+
+static const struct dev_pm_ops charlcd_pm_ops = {
+ .suspend = charlcd_suspend,
+ .resume = charlcd_resume,
+};
+
+static struct platform_driver charlcd_driver = {
+ .driver = {
+ .name = DRIVERNAME,
+ .owner = THIS_MODULE,
+ .pm = &charlcd_pm_ops,
+ },
+ .remove = __exit_p(charlcd_remove),
+};
+
+static int __init charlcd_init(void)
+{
+ return platform_driver_probe(&charlcd_driver, charlcd_probe);
+}
+
+static void __exit charlcd_exit(void)
+{
+ platform_driver_unregister(&charlcd_driver);
+}
+
+module_init(charlcd_init);
+module_exit(charlcd_exit);
+
+MODULE_AUTHOR("Linus Walleij <triad@df.lth.se>");
+MODULE_DESCRIPTION("ARM Character LCD Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c
new file mode 100644
index 00000000..5bb18778
--- /dev/null
+++ b/drivers/misc/atmel-ssc.c
@@ -0,0 +1,177 @@
+/*
+ * Atmel SSC driver
+ *
+ * Copyright (C) 2007 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/list.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/atmel-ssc.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+/* Serialize access to ssc_list and user count */
+static DEFINE_SPINLOCK(user_lock);
+static LIST_HEAD(ssc_list);
+
+struct ssc_device *ssc_request(unsigned int ssc_num)
+{
+ int ssc_valid = 0;
+ struct ssc_device *ssc;
+
+ spin_lock(&user_lock);
+ list_for_each_entry(ssc, &ssc_list, list) {
+ if (ssc->pdev->id == ssc_num) {
+ ssc_valid = 1;
+ break;
+ }
+ }
+
+ if (!ssc_valid) {
+ spin_unlock(&user_lock);
+ pr_err("ssc: ssc%d platform device is missing\n", ssc_num);
+ return ERR_PTR(-ENODEV);
+ }
+
+ if (ssc->user) {
+ spin_unlock(&user_lock);
+ dev_dbg(&ssc->pdev->dev, "module busy\n");
+ return ERR_PTR(-EBUSY);
+ }
+ ssc->user++;
+ spin_unlock(&user_lock);
+
+ clk_enable(ssc->clk);
+
+ return ssc;
+}
+EXPORT_SYMBOL(ssc_request);
+
+void ssc_free(struct ssc_device *ssc)
+{
+ spin_lock(&user_lock);
+ if (ssc->user) {
+ ssc->user--;
+ clk_disable(ssc->clk);
+ } else {
+ dev_dbg(&ssc->pdev->dev, "device already free\n");
+ }
+ spin_unlock(&user_lock);
+}
+EXPORT_SYMBOL(ssc_free);
+
+static int __init ssc_probe(struct platform_device *pdev)
+{
+ int retval = 0;
+ struct resource *regs;
+ struct ssc_device *ssc;
+
+ ssc = kzalloc(sizeof(struct ssc_device), GFP_KERNEL);
+ if (!ssc) {
+ dev_dbg(&pdev->dev, "out of memory\n");
+ retval = -ENOMEM;
+ goto out;
+ }
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs) {
+ dev_dbg(&pdev->dev, "no mmio resource defined\n");
+ retval = -ENXIO;
+ goto out_free;
+ }
+
+ ssc->clk = clk_get(&pdev->dev, "pclk");
+ if (IS_ERR(ssc->clk)) {
+ dev_dbg(&pdev->dev, "no pclk clock defined\n");
+ retval = -ENXIO;
+ goto out_free;
+ }
+
+ ssc->pdev = pdev;
+ ssc->regs = ioremap(regs->start, resource_size(regs));
+ if (!ssc->regs) {
+ dev_dbg(&pdev->dev, "ioremap failed\n");
+ retval = -EINVAL;
+ goto out_clk;
+ }
+
+ /* disable all interrupts */
+ clk_enable(ssc->clk);
+ ssc_writel(ssc->regs, IDR, ~0UL);
+ ssc_readl(ssc->regs, SR);
+ clk_disable(ssc->clk);
+
+ ssc->irq = platform_get_irq(pdev, 0);
+ if (!ssc->irq) {
+ dev_dbg(&pdev->dev, "could not get irq\n");
+ retval = -ENXIO;
+ goto out_unmap;
+ }
+
+ spin_lock(&user_lock);
+ list_add_tail(&ssc->list, &ssc_list);
+ spin_unlock(&user_lock);
+
+ platform_set_drvdata(pdev, ssc);
+
+ dev_info(&pdev->dev, "Atmel SSC device at 0x%p (irq %d)\n",
+ ssc->regs, ssc->irq);
+
+ goto out;
+
+out_unmap:
+ iounmap(ssc->regs);
+out_clk:
+ clk_put(ssc->clk);
+out_free:
+ kfree(ssc);
+out:
+ return retval;
+}
+
+static int __devexit ssc_remove(struct platform_device *pdev)
+{
+ struct ssc_device *ssc = platform_get_drvdata(pdev);
+
+ spin_lock(&user_lock);
+ iounmap(ssc->regs);
+ clk_put(ssc->clk);
+ list_del(&ssc->list);
+ kfree(ssc);
+ spin_unlock(&user_lock);
+
+ return 0;
+}
+
+static struct platform_driver ssc_driver = {
+ .remove = __devexit_p(ssc_remove),
+ .driver = {
+ .name = "ssc",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ssc_init(void)
+{
+ return platform_driver_probe(&ssc_driver, ssc_probe);
+}
+module_init(ssc_init);
+
+static void __exit ssc_exit(void)
+{
+ platform_driver_unregister(&ssc_driver);
+}
+module_exit(ssc_exit);
+
+MODULE_AUTHOR("Hans-Christian Egtvedt <hcegtvedt@atmel.com>");
+MODULE_DESCRIPTION("SSC driver for Atmel AVR32 and AT91");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:ssc");
diff --git a/drivers/misc/atmel_pwm.c b/drivers/misc/atmel_pwm.c
new file mode 100644
index 00000000..28f5aaa1
--- /dev/null
+++ b/drivers/misc/atmel_pwm.c
@@ -0,0 +1,410 @@
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/atmel_pwm.h>
+
+
+/*
+ * This is a simple driver for the PWM controller found in various newer
+ * Atmel SOCs, including the AVR32 series and the AT91sam9263.
+ *
+ * Chips with current Linux ports have only 4 PWM channels, out of max 32.
+ * AT32UC3A and AT32UC3B chips have 7 channels (but currently no Linux).
+ * Docs are inconsistent about the width of the channel counter registers;
+ * it's at least 16 bits, but several places say 20 bits.
+ */
+#define PWM_NCHAN 4 /* max 32 */
+
+struct pwm {
+ spinlock_t lock;
+ struct platform_device *pdev;
+ u32 mask;
+ int irq;
+ void __iomem *base;
+ struct clk *clk;
+ struct pwm_channel *channel[PWM_NCHAN];
+ void (*handler[PWM_NCHAN])(struct pwm_channel *);
+};
+
+
+/* global PWM controller registers */
+#define PWM_MR 0x00
+#define PWM_ENA 0x04
+#define PWM_DIS 0x08
+#define PWM_SR 0x0c
+#define PWM_IER 0x10
+#define PWM_IDR 0x14
+#define PWM_IMR 0x18
+#define PWM_ISR 0x1c
+
+static inline void pwm_writel(const struct pwm *p, unsigned offset, u32 val)
+{
+ __raw_writel(val, p->base + offset);
+}
+
+static inline u32 pwm_readl(const struct pwm *p, unsigned offset)
+{
+ return __raw_readl(p->base + offset);
+}
+
+static inline void __iomem *pwmc_regs(const struct pwm *p, int index)
+{
+ return p->base + 0x200 + index * 0x20;
+}
+
+static struct pwm *pwm;
+
+static void pwm_dumpregs(struct pwm_channel *ch, char *tag)
+{
+ struct device *dev = &pwm->pdev->dev;
+
+ dev_dbg(dev, "%s: mr %08x, sr %08x, imr %08x\n",
+ tag,
+ pwm_readl(pwm, PWM_MR),
+ pwm_readl(pwm, PWM_SR),
+ pwm_readl(pwm, PWM_IMR));
+ dev_dbg(dev,
+ "pwm ch%d - mr %08x, dty %u, prd %u, cnt %u\n",
+ ch->index,
+ pwm_channel_readl(ch, PWM_CMR),
+ pwm_channel_readl(ch, PWM_CDTY),
+ pwm_channel_readl(ch, PWM_CPRD),
+ pwm_channel_readl(ch, PWM_CCNT));
+}
+
+
+/**
+ * pwm_channel_alloc - allocate an unused PWM channel
+ * @index: identifies the channel
+ * @ch: structure to be initialized
+ *
+ * Drivers allocate PWM channels according to the board's wiring, and
+ * matching board-specific setup code. Returns zero or negative errno.
+ */
+int pwm_channel_alloc(int index, struct pwm_channel *ch)
+{
+ unsigned long flags;
+ int status = 0;
+
+ /* insist on PWM init, with this signal pinned out */
+ if (!pwm || !(pwm->mask & 1 << index))
+ return -ENODEV;
+
+ if (index < 0 || index >= PWM_NCHAN || !ch)
+ return -EINVAL;
+ memset(ch, 0, sizeof *ch);
+
+ spin_lock_irqsave(&pwm->lock, flags);
+ if (pwm->channel[index])
+ status = -EBUSY;
+ else {
+ clk_enable(pwm->clk);
+
+ ch->regs = pwmc_regs(pwm, index);
+ ch->index = index;
+
+ /* REVISIT: ap7000 seems to go 2x as fast as we expect!! */
+ ch->mck = clk_get_rate(pwm->clk);
+
+ pwm->channel[index] = ch;
+ pwm->handler[index] = NULL;
+
+ /* channel and irq are always disabled when we return */
+ pwm_writel(pwm, PWM_DIS, 1 << index);
+ pwm_writel(pwm, PWM_IDR, 1 << index);
+ }
+ spin_unlock_irqrestore(&pwm->lock, flags);
+ return status;
+}
+EXPORT_SYMBOL(pwm_channel_alloc);
+
+static int pwmcheck(struct pwm_channel *ch)
+{
+ int index;
+
+ if (!pwm)
+ return -ENODEV;
+ if (!ch)
+ return -EINVAL;
+ index = ch->index;
+ if (index < 0 || index >= PWM_NCHAN || pwm->channel[index] != ch)
+ return -EINVAL;
+
+ return index;
+}
+
+/**
+ * pwm_channel_free - release a previously allocated channel
+ * @ch: the channel being released
+ *
+ * The channel is completely shut down (counter and IRQ disabled),
+ * and made available for re-use. Returns zero, or negative errno.
+ */
+int pwm_channel_free(struct pwm_channel *ch)
+{
+ unsigned long flags;
+ int t;
+
+ spin_lock_irqsave(&pwm->lock, flags);
+ t = pwmcheck(ch);
+ if (t >= 0) {
+ pwm->channel[t] = NULL;
+ pwm->handler[t] = NULL;
+
+ /* channel and irq are always disabled when we return */
+ pwm_writel(pwm, PWM_DIS, 1 << t);
+ pwm_writel(pwm, PWM_IDR, 1 << t);
+
+ clk_disable(pwm->clk);
+ t = 0;
+ }
+ spin_unlock_irqrestore(&pwm->lock, flags);
+ return t;
+}
+EXPORT_SYMBOL(pwm_channel_free);
+
+int __pwm_channel_onoff(struct pwm_channel *ch, int enabled)
+{
+ unsigned long flags;
+ int t;
+
+ /* OMITTED FUNCTIONALITY: starting several channels in synch */
+
+ spin_lock_irqsave(&pwm->lock, flags);
+ t = pwmcheck(ch);
+ if (t >= 0) {
+ pwm_writel(pwm, enabled ? PWM_ENA : PWM_DIS, 1 << t);
+ t = 0;
+ pwm_dumpregs(ch, enabled ? "enable" : "disable");
+ }
+ spin_unlock_irqrestore(&pwm->lock, flags);
+
+ return t;
+}
+EXPORT_SYMBOL(__pwm_channel_onoff);
+
+/**
+ * pwm_clk_alloc - allocate and configure CLKA or CLKB
+ * @prescale: from 0..10, the power of two used to divide MCK
+ * @div: from 1..255, the linear divisor to use
+ *
+ * Returns PWM_CPR_CLKA, PWM_CPR_CLKB, or negative errno. The allocated
+ * clock will run with a period of (2^prescale * div) / MCK, or twice as
+ * long if center aligned PWM output is used. The clock must later be
+ * deconfigured using pwm_clk_free().
+ */
+int pwm_clk_alloc(unsigned prescale, unsigned div)
+{
+ unsigned long flags;
+ u32 mr;
+ u32 val = (prescale << 8) | div;
+ int ret = -EBUSY;
+
+ if (prescale >= 10 || div == 0 || div > 255)
+ return -EINVAL;
+
+ spin_lock_irqsave(&pwm->lock, flags);
+ mr = pwm_readl(pwm, PWM_MR);
+ if ((mr & 0xffff) == 0) {
+ mr |= val;
+ ret = PWM_CPR_CLKA;
+ } else if ((mr & (0xffff << 16)) == 0) {
+ mr |= val << 16;
+ ret = PWM_CPR_CLKB;
+ }
+ if (ret > 0)
+ pwm_writel(pwm, PWM_MR, mr);
+ spin_unlock_irqrestore(&pwm->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(pwm_clk_alloc);
+
+/**
+ * pwm_clk_free - deconfigure and release CLKA or CLKB
+ *
+ * Reverses the effect of pwm_clk_alloc().
+ */
+void pwm_clk_free(unsigned clk)
+{
+ unsigned long flags;
+ u32 mr;
+
+ spin_lock_irqsave(&pwm->lock, flags);
+ mr = pwm_readl(pwm, PWM_MR);
+ if (clk == PWM_CPR_CLKA)
+ pwm_writel(pwm, PWM_MR, mr & ~(0xffff << 0));
+ if (clk == PWM_CPR_CLKB)
+ pwm_writel(pwm, PWM_MR, mr & ~(0xffff << 16));
+ spin_unlock_irqrestore(&pwm->lock, flags);
+}
+EXPORT_SYMBOL(pwm_clk_free);
+
+/**
+ * pwm_channel_handler - manage channel's IRQ handler
+ * @ch: the channel
+ * @handler: the handler to use, possibly NULL
+ *
+ * If the handler is non-null, the handler will be called after every
+ * period of this PWM channel. If the handler is null, this channel
+ * won't generate an IRQ.
+ */
+int pwm_channel_handler(struct pwm_channel *ch,
+ void (*handler)(struct pwm_channel *ch))
+{
+ unsigned long flags;
+ int t;
+
+ spin_lock_irqsave(&pwm->lock, flags);
+ t = pwmcheck(ch);
+ if (t >= 0) {
+ pwm->handler[t] = handler;
+ pwm_writel(pwm, handler ? PWM_IER : PWM_IDR, 1 << t);
+ t = 0;
+ }
+ spin_unlock_irqrestore(&pwm->lock, flags);
+
+ return t;
+}
+EXPORT_SYMBOL(pwm_channel_handler);
+
+static irqreturn_t pwm_irq(int id, void *_pwm)
+{
+ struct pwm *p = _pwm;
+ irqreturn_t handled = IRQ_NONE;
+ u32 irqstat;
+ int index;
+
+ spin_lock(&p->lock);
+
+ /* ack irqs, then handle them */
+ irqstat = pwm_readl(pwm, PWM_ISR);
+
+ while (irqstat) {
+ struct pwm_channel *ch;
+ void (*handler)(struct pwm_channel *ch);
+
+ index = ffs(irqstat) - 1;
+ irqstat &= ~(1 << index);
+ ch = pwm->channel[index];
+ handler = pwm->handler[index];
+ if (handler && ch) {
+ spin_unlock(&p->lock);
+ handler(ch);
+ spin_lock(&p->lock);
+ handled = IRQ_HANDLED;
+ }
+ }
+
+ spin_unlock(&p->lock);
+ return handled;
+}
+
+static int __init pwm_probe(struct platform_device *pdev)
+{
+ struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ int irq = platform_get_irq(pdev, 0);
+ u32 *mp = pdev->dev.platform_data;
+ struct pwm *p;
+ int status = -EIO;
+
+ if (pwm)
+ return -EBUSY;
+ if (!r || irq < 0 || !mp || !*mp)
+ return -ENODEV;
+ if (*mp & ~((1<<PWM_NCHAN)-1)) {
+ dev_warn(&pdev->dev, "mask 0x%x ... more than %d channels\n",
+ *mp, PWM_NCHAN);
+ return -EINVAL;
+ }
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ spin_lock_init(&p->lock);
+ p->pdev = pdev;
+ p->mask = *mp;
+ p->irq = irq;
+ p->base = ioremap(r->start, resource_size(r));
+ if (!p->base)
+ goto fail;
+ p->clk = clk_get(&pdev->dev, "pwm_clk");
+ if (IS_ERR(p->clk)) {
+ status = PTR_ERR(p->clk);
+ p->clk = NULL;
+ goto fail;
+ }
+
+ status = request_irq(irq, pwm_irq, 0, pdev->name, p);
+ if (status < 0)
+ goto fail;
+
+ pwm = p;
+ platform_set_drvdata(pdev, p);
+
+ return 0;
+
+fail:
+ if (p->clk)
+ clk_put(p->clk);
+ if (p->base)
+ iounmap(p->base);
+
+ kfree(p);
+ return status;
+}
+
+static int __exit pwm_remove(struct platform_device *pdev)
+{
+ struct pwm *p = platform_get_drvdata(pdev);
+
+ if (p != pwm)
+ return -EINVAL;
+
+ clk_enable(pwm->clk);
+ pwm_writel(pwm, PWM_DIS, (1 << PWM_NCHAN) - 1);
+ pwm_writel(pwm, PWM_IDR, (1 << PWM_NCHAN) - 1);
+ clk_disable(pwm->clk);
+
+ pwm = NULL;
+
+ free_irq(p->irq, p);
+ clk_put(p->clk);
+ iounmap(p->base);
+ kfree(p);
+
+ return 0;
+}
+
+static struct platform_driver atmel_pwm_driver = {
+ .driver = {
+ .name = "atmel_pwm",
+ .owner = THIS_MODULE,
+ },
+ .remove = __exit_p(pwm_remove),
+
+ /* NOTE: PWM can keep running in AVR32 "idle" and "frozen" states;
+ * and all AT91sam9263 states, albeit at reduced clock rate if
+ * MCK becomes the slow clock (i.e. what Linux labels STR).
+ */
+};
+
+static int __init pwm_init(void)
+{
+ return platform_driver_probe(&atmel_pwm_driver, pwm_probe);
+}
+module_init(pwm_init);
+
+static void __exit pwm_exit(void)
+{
+ platform_driver_unregister(&atmel_pwm_driver);
+}
+module_exit(pwm_exit);
+
+MODULE_DESCRIPTION("Driver for AT32/AT91 PWM module");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:atmel_pwm");
diff --git a/drivers/misc/atmel_tclib.c b/drivers/misc/atmel_tclib.c
new file mode 100644
index 00000000..c8d8e38d
--- /dev/null
+++ b/drivers/misc/atmel_tclib.c
@@ -0,0 +1,207 @@
+#include <linux/atmel_tc.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/of.h>
+
+/*
+ * This is a thin library to solve the problem of how to portably allocate
+ * one of the TC blocks. For simplicity, it doesn't currently expect to
+ * share individual timers between different drivers.
+ */
+
+#if defined(CONFIG_AVR32)
+/* AVR32 has these divide PBB */
+const u8 atmel_tc_divisors[5] = { 0, 4, 8, 16, 32, };
+EXPORT_SYMBOL(atmel_tc_divisors);
+
+#elif defined(CONFIG_ARCH_AT91)
+/* AT91 has these divide MCK */
+const u8 atmel_tc_divisors[5] = { 2, 8, 32, 128, 0, };
+EXPORT_SYMBOL(atmel_tc_divisors);
+
+#endif
+
+static DEFINE_SPINLOCK(tc_list_lock);
+static LIST_HEAD(tc_list);
+
+/**
+ * atmel_tc_alloc - allocate a specified TC block
+ * @block: which block to allocate
+ * @name: name to be associated with the iomem resource
+ *
+ * Caller allocates a block. If it is available, a pointer to a
+ * pre-initialized struct atmel_tc is returned. The caller can access
+ * the registers directly through the "regs" field.
+ */
+struct atmel_tc *atmel_tc_alloc(unsigned block, const char *name)
+{
+ struct atmel_tc *tc;
+ struct platform_device *pdev = NULL;
+ struct resource *r;
+ size_t size;
+
+ spin_lock(&tc_list_lock);
+ list_for_each_entry(tc, &tc_list, node) {
+ if (tc->pdev->dev.of_node) {
+ if (of_alias_get_id(tc->pdev->dev.of_node, "tcb")
+ == block) {
+ pdev = tc->pdev;
+ break;
+ }
+ } else if (tc->pdev->id == block) {
+ pdev = tc->pdev;
+ break;
+ }
+ }
+
+ if (!pdev || tc->iomem)
+ goto fail;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r)
+ goto fail;
+
+ size = resource_size(r);
+ r = request_mem_region(r->start, size, name);
+ if (!r)
+ goto fail;
+
+ tc->regs = ioremap(r->start, size);
+ if (!tc->regs)
+ goto fail_ioremap;
+
+ tc->iomem = r;
+
+out:
+ spin_unlock(&tc_list_lock);
+ return tc;
+
+fail_ioremap:
+ release_mem_region(r->start, size);
+fail:
+ tc = NULL;
+ goto out;
+}
+EXPORT_SYMBOL_GPL(atmel_tc_alloc);
+
+/**
+ * atmel_tc_free - release a specified TC block
+ * @tc: Timer/counter block that was returned by atmel_tc_alloc()
+ *
+ * This reverses the effect of atmel_tc_alloc(), unmapping the I/O
+ * registers, invalidating the resource returned by that routine and
+ * making the TC available to other drivers.
+ */
+void atmel_tc_free(struct atmel_tc *tc)
+{
+ spin_lock(&tc_list_lock);
+ if (tc->regs) {
+ iounmap(tc->regs);
+ release_mem_region(tc->iomem->start, resource_size(tc->iomem));
+ tc->regs = NULL;
+ tc->iomem = NULL;
+ }
+ spin_unlock(&tc_list_lock);
+}
+EXPORT_SYMBOL_GPL(atmel_tc_free);
+
+#if defined(CONFIG_OF)
+static struct atmel_tcb_config tcb_rm9200_config = {
+ .counter_width = 16,
+};
+
+static struct atmel_tcb_config tcb_sam9x5_config = {
+ .counter_width = 32,
+};
+
+static const struct of_device_id atmel_tcb_dt_ids[] = {
+ {
+ .compatible = "atmel,at91rm9200-tcb",
+ .data = &tcb_rm9200_config,
+ }, {
+ .compatible = "atmel,at91sam9x5-tcb",
+ .data = &tcb_sam9x5_config,
+ }, {
+ /* sentinel */
+ }
+};
+
+MODULE_DEVICE_TABLE(of, atmel_tcb_dt_ids);
+#endif
+
+static int __init tc_probe(struct platform_device *pdev)
+{
+ struct atmel_tc *tc;
+ struct clk *clk;
+ int irq;
+
+ if (!platform_get_resource(pdev, IORESOURCE_MEM, 0))
+ return -EINVAL;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -EINVAL;
+
+ tc = kzalloc(sizeof(struct atmel_tc), GFP_KERNEL);
+ if (!tc)
+ return -ENOMEM;
+
+ tc->pdev = pdev;
+
+ clk = clk_get(&pdev->dev, "t0_clk");
+ if (IS_ERR(clk)) {
+ kfree(tc);
+ return -EINVAL;
+ }
+
+ /* Now take SoC information if available */
+ if (pdev->dev.of_node) {
+ const struct of_device_id *match;
+ match = of_match_node(atmel_tcb_dt_ids, pdev->dev.of_node);
+ if (match)
+ tc->tcb_config = match->data;
+ }
+
+ tc->clk[0] = clk;
+ tc->clk[1] = clk_get(&pdev->dev, "t1_clk");
+ if (IS_ERR(tc->clk[1]))
+ tc->clk[1] = clk;
+ tc->clk[2] = clk_get(&pdev->dev, "t2_clk");
+ if (IS_ERR(tc->clk[2]))
+ tc->clk[2] = clk;
+
+ tc->irq[0] = irq;
+ tc->irq[1] = platform_get_irq(pdev, 1);
+ if (tc->irq[1] < 0)
+ tc->irq[1] = irq;
+ tc->irq[2] = platform_get_irq(pdev, 2);
+ if (tc->irq[2] < 0)
+ tc->irq[2] = irq;
+
+ spin_lock(&tc_list_lock);
+ list_add_tail(&tc->node, &tc_list);
+ spin_unlock(&tc_list_lock);
+
+ return 0;
+}
+
+static struct platform_driver tc_driver = {
+ .driver = {
+ .name = "atmel_tcb",
+ .of_match_table = of_match_ptr(atmel_tcb_dt_ids),
+ },
+};
+
+static int __init tc_init(void)
+{
+ return platform_driver_probe(&tc_driver, tc_probe);
+}
+arch_initcall(tc_init);
diff --git a/drivers/misc/bcm2079x-i2c.c b/drivers/misc/bcm2079x-i2c.c
new file mode 100644
index 00000000..34f15cdf
--- /dev/null
+++ b/drivers/misc/bcm2079x-i2c.c
@@ -0,0 +1,736 @@
+/*
+ * Copyright (C) 2012 Broadcom Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/i2c.h>
+#include <linux/irq.h>
+#include <linux/jiffies.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/miscdevice.h>
+#include <linux/spinlock.h>
+#include <linux/poll.h>
+#include <linux/version.h>
+
+//Platform specific
+#include <mach/gpio.h>
+#include <mach/wmt_iomux.h>
+#include <mach/hardware.h>
+#include <mach/wmt_pmc.h>
+
+//#include <mach/sys_config.h>
+//Platform specific
+#include <linux/nfc/bcm2079x.h>
+
+#define USE_WAKE_LOCK
+
+#ifdef USE_WAKE_LOCK
+#include <linux/wakelock.h>
+#endif
+
+#define TRUE 1
+#define FALSE 0
+#define STATE_HIGH 1
+#define STATE_LOW 0
+
+unsigned int enable;
+
+
+/* end of compile options */
+
+/* do not change below */
+#define MAX_BUFFER_SIZE 780
+
+ /* Read data */
+#define PACKET_HEADER_SIZE_NCI (4)
+#define PACKET_HEADER_SIZE_HCI (3)
+#define PACKET_TYPE_NCI (16)
+#define PACKET_TYPE_HCIEV (4)
+#define MAX_PACKET_SIZE (PACKET_HEADER_SIZE_NCI + 255)
+
+#define CLIENT_ADDR 0x77
+#define WMT_BCM2079X_I2C_CHANNEL 3
+struct i2c_client *bcm2079x_client;
+
+
+struct bcm2079x_dev {
+ wait_queue_head_t read_wq;
+ struct mutex read_mutex;
+ struct i2c_client *client;
+ struct miscdevice bcm2079x_device;
+ unsigned int wake_gpio;
+ unsigned int en_gpio;
+ unsigned int irq_gpio;
+ unsigned int irq_no;
+ bool irq_enabled;
+ spinlock_t irq_enabled_lock;
+ unsigned int error_write;
+ unsigned int error_read;
+ unsigned int count_read;
+ unsigned int count_irq;
+ int original_address;
+#ifdef USE_WAKE_LOCK
+ struct wake_lock wake_lock;
+#endif
+};
+
+#define BCM2079X_NAME "bcm2079x-i2c"
+#define VERSION "AMPAK-BCM2079X-DRIVER-VERSION-1.0.0"
+unsigned int nfc_handle = -1;
+unsigned int bcm2079x_irq_handle(void *para);
+
+/*hang on i2c bus no.3*/
+static __u32 twi_id = 3;
+
+static struct bcm2079x_platform_data bcm2079x_pdata = {
+ .irq_gpio = WMT_PIN_GP62_WAKEUP3,//GPIOG(11), //NFC_IRQ, GPIOG(11)
+ .irq_no = IRQ_PMC_WAKEUP,
+ .en_gpio = WMT_PIN_GP62_SUSGPIO0,//GPIOM(5), //NFC_EN, GPIOM(5)
+ .wake_gpio = WMT_PIN_GP1_GPIO9,//GPIOG(10), //NFC_FIRM,
+};
+
+int bcm2079x_detect(struct i2c_client *client, struct i2c_board_info *info)
+{
+ struct i2c_adapter *adapter = client->adapter;
+
+ if(twi_id == adapter->nr)
+ {
+ dev_info(&client->dev, "%s: Detected chip %s at adapter %d, address 0x%02x\n",\
+ __func__, BCM2079X_NAME, i2c_adapter_id(adapter), client->addr);
+ strlcpy(info->type, BCM2079X_NAME, I2C_NAME_SIZE);
+ info->platform_data = &bcm2079x_pdata;
+ return 0;
+ }else{
+ return -ENODEV;
+ }
+}
+
+static void bcm2079x_init_stat(struct bcm2079x_dev *bcm2079x_dev)
+{
+ bcm2079x_dev->error_write = 0;
+ bcm2079x_dev->error_read = 0;
+ bcm2079x_dev->count_read = 0;
+ bcm2079x_dev->count_irq = 0;
+}
+
+static void bcm2079x_disable_irq(struct bcm2079x_dev *bcm2079x_dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&bcm2079x_dev->irq_enabled_lock, flags);
+ if (bcm2079x_dev->irq_enabled) {
+ pmc_disable_wakeup_isr(WKS_WK3);//disable_irq_nosync(bcm2079x_dev->client->irq);
+ bcm2079x_dev->irq_enabled = false;
+ }
+ spin_unlock_irqrestore(&bcm2079x_dev->irq_enabled_lock, flags);
+}
+
+static void bcm2079x_enable_irq(struct bcm2079x_dev *bcm2079x_dev)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&bcm2079x_dev->irq_enabled_lock, flags);
+ if (!bcm2079x_dev->irq_enabled) {
+ bcm2079x_dev->irq_enabled = true;
+ pmc_enable_wakeup_isr(WKS_WK3,3);//enable_irq(bcm2079x_dev->client->irq);
+ }
+ spin_unlock_irqrestore(&bcm2079x_dev->irq_enabled_lock, flags);
+}
+
+/*
+ The alias address 0x79, when sent as a 7-bit address from the host processor
+ will match the first byte (highest 2 bits) of the default client address
+ (0x1FA) that is programmed in bcm20791.
+ When used together with the first byte (0xFA) of the byte sequence below,
+ it can be used to address the bcm20791 in a system that does not support
+ 10-bit address and change the default address to 0x38.
+ the new address can be changed by changing the CLIENT_ADDRESS below if 0x38
+ conflicts with other device on the same i2c bus.
+ */
+#define ALIAS_ADDRESS 0x79
+
+static void set_client_addr(struct bcm2079x_dev *bcm2079x_dev, int addr)
+{
+ struct i2c_client *client = bcm2079x_dev->client;
+ client->addr = addr;
+ if (addr > 0x7F)
+ client->flags |= I2C_CLIENT_TEN;
+ else
+ client->flags &= ~I2C_CLIENT_TEN;
+
+ dev_info(&client->dev,
+ "Set client device changed to (0x%04X) flag = %04x\n",
+ client->addr, client->flags);
+}
+
+static void change_client_addr(struct bcm2079x_dev *bcm2079x_dev, int addr)
+{
+ struct i2c_client *client;
+ int ret;
+ int i;
+ int offset = 1;
+ char addr_data[] = {
+ 0xFA, 0xF2, 0x00, 0x00, 0x00, 0x38, 0x00, 0x00, 0x00, 0x2A
+ };
+
+ client = bcm2079x_dev->client;
+ if ((client->flags & I2C_CLIENT_TEN) == I2C_CLIENT_TEN) {
+ client->addr = ALIAS_ADDRESS;
+ client->flags &= ~I2C_CLIENT_TEN;
+ offset = 0;
+ }
+
+ addr_data[5] = addr & 0xFF;
+ ret = 0;
+ for (i = 1; i < sizeof(addr_data) - 1; ++i)
+ ret += addr_data[i];
+ addr_data[sizeof(addr_data) - 1] = (ret & 0xFF);
+ dev_info(&client->dev,
+ "Change client device from (0x%04X) flag = "\
+ "%04x, addr_data[%d] = %02x\n",
+ client->addr, client->flags, sizeof(addr_data) - 1,
+ addr_data[sizeof(addr_data) - 1]);
+ ret = i2c_master_send(client, addr_data+offset, sizeof(addr_data)-offset);
+ if (ret != sizeof(addr_data)-offset) {
+ client->addr = ALIAS_ADDRESS;
+ client->flags &= ~I2C_CLIENT_TEN;
+ dev_info(&client->dev,
+ "Change client device from (0x%04X) flag = "\
+ "%04x, addr_data[%d] = %02x\n",
+ client->addr, client->flags, sizeof(addr_data) - 1,
+ addr_data[sizeof(addr_data) - 1]);
+ ret = i2c_master_send(client, addr_data, sizeof(addr_data));
+ }
+ client->addr = addr_data[5];
+
+ dev_info(&client->dev,
+ "Change client device changed to (0x%04X) flag = %04x, ret = %d\n",
+ client->addr, client->flags, ret);
+}
+
+static irqreturn_t bcm2079x_dev_irq_handler(int irq, void *dev_id)
+{
+ struct bcm2079x_dev *bcm2079x_dev = dev_id;
+ unsigned long flags;
+ //unsigned int status_i;
+
+ //status_i = PMCIS_VAL;
+
+ //rmb();
+ if(bcm2079x_dev->client == bcm2079x_client){
+ if(enable){
+ #ifdef USE_WAKE_LOCK
+ int wakelockcnt = 0;
+ if(! (wakelockcnt = wake_lock_active(&bcm2079x_dev->wake_lock )))
+ {
+ printk("irq aquire wake lock\n");
+ wake_lock(&bcm2079x_dev->wake_lock);
+ }else
+ {
+ // printk("irq wake lock count = %d\n", wakelockcnt);
+ }
+ //printk("irq handler ( wake lock %d)...\n", wakelockcnt);
+ #endif
+
+ spin_lock_irqsave(&bcm2079x_dev->irq_enabled_lock, flags);
+ bcm2079x_dev->count_irq++;
+ spin_unlock_irqrestore(&bcm2079x_dev->irq_enabled_lock, flags);
+ wake_up(&bcm2079x_dev->read_wq);
+ }
+ pmc_clear_intr_status(WKS_WK3);
+ return IRQ_HANDLED;
+ }
+ else
+ return IRQ_NONE;
+}
+
+static unsigned int bcm2079x_dev_poll(struct file *filp, poll_table *wait)
+{
+ struct bcm2079x_dev *bcm2079x_dev = filp->private_data;
+ unsigned int mask = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bcm2079x_dev->irq_enabled_lock, flags);
+ //if(!gpio_get_value(bcm2079x_dev->irq_gpio) && (bcm2079x_dev->count_irq < 1) )
+ //Platform specific API
+ if(!__gpio_get_value(bcm2079x_dev->irq_gpio) && (bcm2079x_dev->count_irq < 1) )
+ {
+ spin_unlock_irqrestore(&bcm2079x_dev->irq_enabled_lock, flags);
+ printk("poll wait, irq count %d, irq_gpio %d\n", bcm2079x_dev->count_irq, bcm2079x_dev->irq_gpio );
+ poll_wait(filp, &bcm2079x_dev->read_wq, wait);
+ }else
+ {
+ if (bcm2079x_dev->count_irq < 1)
+ bcm2079x_dev->count_irq = 1;
+
+ spin_unlock_irqrestore(&bcm2079x_dev->irq_enabled_lock, flags);
+ printk("poll there is data to read!!! no wait any more.\n");
+ return (POLLIN | POLLRDNORM);
+ }
+
+ spin_lock_irqsave(&bcm2079x_dev->irq_enabled_lock, flags);
+ if (bcm2079x_dev->count_irq > 0)
+ mask |= POLLIN | POLLRDNORM;
+ spin_unlock_irqrestore(&bcm2079x_dev->irq_enabled_lock, flags);
+
+ return mask;
+}
+
+static ssize_t bcm2079x_dev_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *offset)
+{
+ struct bcm2079x_dev *bcm2079x_dev = filp->private_data;
+ unsigned char tmp[MAX_BUFFER_SIZE];
+ int total, len, ret;
+
+ total = 0;
+ len = 0;
+
+ if (bcm2079x_dev->count_irq > 0)
+ bcm2079x_dev->count_irq--;
+
+ bcm2079x_dev->count_read++;
+ if (count > MAX_BUFFER_SIZE)
+ count = MAX_BUFFER_SIZE;
+
+ mutex_lock(&bcm2079x_dev->read_mutex);
+
+ /** Read the first 4 bytes to include the length of the NCI or HCI packet.
+ **/
+ ret = i2c_master_recv(bcm2079x_dev->client, tmp, 4);
+ if (ret == 4) {
+ total = ret;
+ /** First byte is the packet type
+ **/
+ switch(tmp[0]) {
+ case PACKET_TYPE_NCI:
+ len = tmp[PACKET_HEADER_SIZE_NCI-1];
+ break;
+
+ case PACKET_TYPE_HCIEV:
+ len = tmp[PACKET_HEADER_SIZE_HCI-1];
+ if (len == 0)
+ total--; /*Since payload is 0, decrement total size (from 4 to 3) */
+ else
+ len--; /*First byte of payload is in tmp[3] already */
+ break;
+
+ default:
+ len = 0; /*Unknown packet byte */
+ break;
+ } /* switch*/
+
+ /** make sure full packet fits in the buffer
+ **/
+ if (len > 0 && (len + total) <= count) {
+ /** read the remainder of the packet.
+ **/
+ ret = i2c_master_recv(bcm2079x_dev->client, tmp+total, len);
+ if (ret == len)
+ total += len;
+ } /* if */
+ } /* if */
+
+ mutex_unlock(&bcm2079x_dev->read_mutex);
+
+ if (total > count || copy_to_user(buf, tmp, total)) {
+ dev_err(&bcm2079x_dev->client->dev,
+ "failed to copy to user space, total = %d\n", total);
+ total = -EFAULT;
+ bcm2079x_dev->error_read++;
+ }
+
+ return total;
+}
+
+static ssize_t bcm2079x_dev_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *offset)
+{
+ struct bcm2079x_dev *bcm2079x_dev = filp->private_data;
+ char tmp[MAX_BUFFER_SIZE];
+ int ret;
+
+ if (count > MAX_BUFFER_SIZE) {
+ dev_err(&bcm2079x_dev->client->dev, "out of memory\n");
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(tmp, buf, count)) {
+ dev_err(&bcm2079x_dev->client->dev,
+ "failed to copy from user space\n");
+ return -EFAULT;
+ }
+
+ mutex_lock(&bcm2079x_dev->read_mutex);
+ /* Write data */
+
+ ret = i2c_master_send(bcm2079x_dev->client, tmp, count);
+ if (ret != count) {
+ if ((bcm2079x_dev->client->flags & I2C_CLIENT_TEN) != I2C_CLIENT_TEN && bcm2079x_dev->error_write == 0) {
+ set_client_addr(bcm2079x_dev, 0x1FA);
+ ret = i2c_master_send(bcm2079x_dev->client, tmp, count);
+ if (ret != count)
+ bcm2079x_dev->error_write++;
+ set_client_addr(bcm2079x_dev, bcm2079x_dev->original_address);
+ } else {
+ dev_err(&bcm2079x_dev->client->dev,
+ "failed to write %d\n", ret);
+ ret = -EIO;
+ bcm2079x_dev->error_write++;
+ }
+ }
+ mutex_unlock(&bcm2079x_dev->read_mutex);
+
+ return ret;
+}
+
+static int bcm2079x_dev_open(struct inode *inode, struct file *filp)
+{
+ int ret = 0;
+
+ struct bcm2079x_dev *bcm2079x_dev = container_of(filp->private_data,
+ struct bcm2079x_dev,
+ bcm2079x_device);
+ filp->private_data = bcm2079x_dev;
+ bcm2079x_init_stat(bcm2079x_dev);
+ bcm2079x_enable_irq(bcm2079x_dev);
+ dev_info(&bcm2079x_dev->client->dev,
+ "device node major=%d, minor=%d\n", imajor(inode), iminor(inode));
+
+ return ret;
+}
+
+static long bcm2079x_dev_unlocked_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct bcm2079x_dev *bcm2079x_dev = filp->private_data;
+
+ switch (cmd) {
+ case BCMNFC_READ_FULL_PACKET:
+ break;
+ case BCMNFC_READ_MULTI_PACKETS:
+ break;
+ case BCMNFC_CHANGE_ADDR:
+ dev_info(&bcm2079x_dev->client->dev,
+ "%s, BCMNFC_CHANGE_ADDR (%x, %lx):\n", __func__, cmd,
+ arg);
+ change_client_addr(bcm2079x_dev, arg);
+ break;
+ case BCMNFC_POWER_CTL:
+ dev_info(&bcm2079x_dev->client->dev,
+ "%s, BCMNFC_POWER_CTL (%x, %lx):\n", __func__, cmd,
+ arg);
+ if (arg != 1)
+ set_client_addr(bcm2079x_dev, bcm2079x_dev->original_address);
+ //gpio_set_value(bcm2079x_dev->en_gpio, arg);
+ //Platform specific API
+ __gpio_set_value(bcm2079x_dev->en_gpio, arg);
+ enable = arg;
+ break;
+ case BCMNFC_WAKE_CTL:
+ dev_info(&bcm2079x_dev->client->dev,
+ "%s, BCMNFC_WAKE_CTL (%x, %lx):\n", __func__, cmd,
+ arg);
+#ifdef USE_WAKE_LOCK
+ if(arg != 0)
+ {
+ while(wake_lock_active(&bcm2079x_dev->wake_lock ))
+ {
+ printk("release wake lock!!!\n");
+ wake_unlock(&bcm2079x_dev->wake_lock);
+ }
+ //wake_lock_timeout(&bcm2079x_dev->wake_lock, HZ*2);
+ }
+#endif
+ //gpio_set_value(bcm2079x_dev->wake_gpio, arg);
+ //Platform specific API
+ __gpio_set_value(bcm2079x_dev->wake_gpio, arg);
+ break;
+ default:
+ dev_err(&bcm2079x_dev->client->dev,
+ "%s, unknown cmd (%x, %lx)\n", __func__, cmd, arg);
+ return 0;
+ }
+
+ return 0;
+}
+
+static const struct file_operations bcm2079x_dev_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .poll = bcm2079x_dev_poll,
+ .read = bcm2079x_dev_read,
+ .write = bcm2079x_dev_write,
+ .open = bcm2079x_dev_open,
+ .unlocked_ioctl = bcm2079x_dev_unlocked_ioctl
+};
+
+unsigned int bcm2079x_irq_handle(void *para)
+{
+ bcm2079x_dev_irq_handler(0, para);
+ return 0;
+}
+
+static int bcm2079x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct bcm2079x_platform_data *platform_data;
+ struct bcm2079x_dev *bcm2079x_dev;
+
+ platform_data = client->dev.platform_data;
+ /*Keep this DRIVER information*/
+ dev_info(&client->dev, "Driver Version: %s\n", VERSION);
+
+ dev_info(&client->dev, "%s, probing bcm2079x driver flags = %x\n", __func__, client->flags);
+ if (platform_data == NULL) {
+ dev_err(&client->dev, "nfc probe fail\n");
+ return -ENODEV;
+ }
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "need I2C_FUNC_I2C\n");
+ return -ENODEV;
+ }
+
+ ret = gpio_request(platform_data->irq_gpio, "nfc_int");
+ if (ret)
+ //dev_err(&client->dev, "fail to request nfc_int\n");
+ return -ENODEV;
+
+ ret = gpio_request(platform_data->en_gpio, "nfc_ven");
+ if (ret)
+ //dev_err(&client->dev, "fail to request nfc_ven\n");
+ goto err_en;
+
+ ret = gpio_request(platform_data->wake_gpio, "nfc_firm");
+ if (ret)
+ //dev_err(&client->dev, "fail to request nfc_firm\n");
+ goto err_firm;
+
+ gpio_direction_output(platform_data->en_gpio, 0 );
+ gpio_direction_output(platform_data->wake_gpio, 0);
+ //gpio_set_value(platform_data->en_gpio, 0);
+ //gpio_set_value(platform_data->wake_gpio, 0);
+
+ //Platform specific API
+ __gpio_set_value(platform_data->en_gpio, 0);
+ __gpio_set_value(platform_data->wake_gpio, 0);
+
+ gpio_direction_input(platform_data->irq_gpio );
+ wmt_gpio_setpull(platform_data->irq_gpio, WMT_GPIO_PULL_DOWN);
+
+ bcm2079x_dev = kzalloc(sizeof(*bcm2079x_dev), GFP_KERNEL);
+ if (bcm2079x_dev == NULL) {
+ dev_err(&client->dev,
+ "failed to allocate memory for module data\n");
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+
+ //Platform specific API
+ client->irq = platform_data->irq_no;//__gpio_to_irq(platform_data->irq_gpio);
+ if (client->irq == -ENXIO) {
+ dev_err(&client->dev,"[bcm2079x]: get gpio irq fail.\n");
+ goto err_exit;
+ }
+
+ bcm2079x_dev->wake_gpio = platform_data->wake_gpio;
+ bcm2079x_dev->irq_gpio = platform_data->irq_gpio;
+ bcm2079x_dev->irq_no = platform_data->irq_no;
+ bcm2079x_dev->en_gpio = platform_data->en_gpio;
+ bcm2079x_dev->client = client;
+
+ /* init mutex and queues */
+ init_waitqueue_head(&bcm2079x_dev->read_wq);
+ mutex_init(&bcm2079x_dev->read_mutex);
+ spin_lock_init(&bcm2079x_dev->irq_enabled_lock);
+
+ bcm2079x_dev->bcm2079x_device.minor = MISC_DYNAMIC_MINOR;
+ bcm2079x_dev->bcm2079x_device.name = "bcm2079x";
+ bcm2079x_dev->bcm2079x_device.fops = &bcm2079x_dev_fops;
+
+ ret = misc_register(&bcm2079x_dev->bcm2079x_device);
+ if (ret) {
+ dev_err(&client->dev, "misc_register failed\n");
+ goto err_misc_register;
+ }
+
+ dev_info(&client->dev,
+ "%s, saving address %d\n",
+ __func__, client->addr);
+ bcm2079x_dev->original_address = client->addr;
+
+ /* request irq. the irq is set whenever the chip has data available
+ * for reading. it is cleared when all data has been read.
+ */
+ dev_info(&client->dev, "requesting IRQ %d with IRQF_NO_SUSPEND\n", client->irq);
+ bcm2079x_dev->irq_enabled = true;
+ /*
+ ret = request_irq(client->irq, bcm2079x_dev_irq_handler,
+ IRQF_TRIGGER_RISING|IRQF_NO_SUSPEND, client->name, bcm2079x_dev);
+ if (ret) {
+ dev_err(&client->dev, "request_irq failed\n");
+ goto err_request_irq_failed;
+ }
+ enable_irq_wake(client->irq);
+
+ */
+ //Platform specific API
+ nfc_handle = request_irq(bcm2079x_dev->irq_no, bcm2079x_dev_irq_handler,IRQF_NO_SUSPEND|IRQF_SHARED,BCM2079X_NAME,
+ bcm2079x_dev);
+
+ if (nfc_handle) {
+ dev_err(&client->dev, "request_irq failed.\n");
+ goto err_request_irq_failed;
+ }
+
+ bcm2079x_disable_irq(bcm2079x_dev);
+ i2c_set_clientdata(client, bcm2079x_dev);
+ dev_info(&client->dev,
+ "%s, probing bcm2079x driver exited successfully\n",
+ __func__);
+
+#ifdef USE_WAKE_LOCK
+ wake_lock_init(&bcm2079x_dev->wake_lock , WAKE_LOCK_SUSPEND, "nfcwakelock" );
+#endif
+ return 0;
+
+err_request_irq_failed:
+ misc_deregister(&bcm2079x_dev->bcm2079x_device);
+err_misc_register:
+ mutex_destroy(&bcm2079x_dev->read_mutex);
+ kfree(bcm2079x_dev);
+err_exit:
+ gpio_free(platform_data->wake_gpio);
+err_firm:
+ gpio_free(platform_data->en_gpio);
+err_en:
+ gpio_free(platform_data->irq_gpio);
+ return ret;
+}
+
+static int bcm2079x_remove(struct i2c_client *client)
+{
+ struct bcm2079x_dev *bcm2079x_dev;
+
+ bcm2079x_dev = i2c_get_clientdata(client);
+ //free_irq(client->irq, bcm2079x_dev);
+ //Platform specific API
+ if (!nfc_handle)
+ free_irq(bcm2079x_dev->irq_no,bcm2079x_dev);
+
+ misc_deregister(&bcm2079x_dev->bcm2079x_device);
+ mutex_destroy(&bcm2079x_dev->read_mutex);
+ gpio_free(bcm2079x_dev->irq_gpio);
+ gpio_free(bcm2079x_dev->en_gpio);
+ gpio_free(bcm2079x_dev->wake_gpio);
+ kfree(bcm2079x_dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id bcm2079x_id[] = {
+ {"bcm2079x-i2c", 0},
+ {}
+};
+
+/*******************************************************************/
+/* AP6493 */
+/*******************************************************************/
+static const unsigned short normal_i2c[] = {0x77, I2C_CLIENT_END};
+/*******************************************************************/
+/* AP6441 */
+/*******************************************************************/
+//static const unsigned short normal_i2c[] = {0x76, I2C_CLIENT_END};
+
+static struct i2c_driver bcm2079x_driver = {
+ .class = I2C_CLASS_HWMON,
+ .id_table = bcm2079x_id,
+ .probe = bcm2079x_probe,
+ .remove = bcm2079x_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "bcm2079x-i2c",
+ },
+ .address_list = normal_i2c,
+};
+
+/*
+ * module load/unload record keeping
+ */
+ struct i2c_board_info wmt_bcm2079x_bi = {
+ .type = BCM2079X_NAME,
+ .flags = 0x00,
+ .addr = CLIENT_ADDR,
+ .platform_data = &bcm2079x_pdata,
+ .archdata = NULL,
+ .irq = IRQ_PMC_WAKEUP,
+ };
+
+static int __init bcm2079x_dev_init(void)
+{
+ int r;
+ struct i2c_adapter *adapter;
+
+ adapter = i2c_get_adapter(WMT_BCM2079X_I2C_CHANNEL);
+ if (adapter == NULL) {
+ printk("can not get i2c adapter, client address error");
+ return -ENODEV;
+ }
+
+ bcm2079x_client = i2c_new_device(adapter, &wmt_bcm2079x_bi);
+ if ( bcm2079x_client == NULL) {
+ printk("allocate i2c client failed");
+ return -ENOMEM;
+ }
+
+ i2c_put_adapter(adapter);
+
+ r = i2c_add_driver(&bcm2079x_driver);
+ if (r) {
+ pr_err(BCM2079X_NAME ": driver registration failed\n");
+ return r;
+ }
+
+ return 0;
+ /*bcm2079x_driver.detect = bcm2079x_detect;
+
+ return i2c_add_driver(&bcm2079x_driver);*/
+}
+module_init(bcm2079x_dev_init);
+
+static void __exit bcm2079x_dev_exit(void)
+{
+ i2c_del_driver(&bcm2079x_driver);
+ i2c_unregister_device(bcm2079x_client);
+}
+module_exit(bcm2079x_dev_exit);
+
+MODULE_AUTHOR("Broadcom");
+MODULE_DESCRIPTION("NFC bcm2079x driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/bh1770glc.c b/drivers/misc/bh1770glc.c
new file mode 100644
index 00000000..3d56ae7e
--- /dev/null
+++ b/drivers/misc/bh1770glc.c
@@ -0,0 +1,1406 @@
+/*
+ * This file is part of the ROHM BH1770GLC / OSRAM SFH7770 sensor driver.
+ * Chip is combined proximity and ambient light sensor.
+ *
+ * Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
+ *
+ * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/i2c/bh1770glc.h>
+#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/slab.h>
+
+#define BH1770_ALS_CONTROL 0x80 /* ALS operation mode control */
+#define BH1770_PS_CONTROL 0x81 /* PS operation mode control */
+#define BH1770_I_LED 0x82 /* active LED and LED1, LED2 current */
+#define BH1770_I_LED3 0x83 /* LED3 current setting */
+#define BH1770_ALS_PS_MEAS 0x84 /* Forced mode trigger */
+#define BH1770_PS_MEAS_RATE 0x85 /* PS meas. rate at stand alone mode */
+#define BH1770_ALS_MEAS_RATE 0x86 /* ALS meas. rate at stand alone mode */
+#define BH1770_PART_ID 0x8a /* Part number and revision ID */
+#define BH1770_MANUFACT_ID 0x8b /* Manufacturerer ID */
+#define BH1770_ALS_DATA_0 0x8c /* ALS DATA low byte */
+#define BH1770_ALS_DATA_1 0x8d /* ALS DATA high byte */
+#define BH1770_ALS_PS_STATUS 0x8e /* Measurement data and int status */
+#define BH1770_PS_DATA_LED1 0x8f /* PS data from LED1 */
+#define BH1770_PS_DATA_LED2 0x90 /* PS data from LED2 */
+#define BH1770_PS_DATA_LED3 0x91 /* PS data from LED3 */
+#define BH1770_INTERRUPT 0x92 /* Interrupt setting */
+#define BH1770_PS_TH_LED1 0x93 /* PS interrupt threshold for LED1 */
+#define BH1770_PS_TH_LED2 0x94 /* PS interrupt threshold for LED2 */
+#define BH1770_PS_TH_LED3 0x95 /* PS interrupt threshold for LED3 */
+#define BH1770_ALS_TH_UP_0 0x96 /* ALS upper threshold low byte */
+#define BH1770_ALS_TH_UP_1 0x97 /* ALS upper threshold high byte */
+#define BH1770_ALS_TH_LOW_0 0x98 /* ALS lower threshold low byte */
+#define BH1770_ALS_TH_LOW_1 0x99 /* ALS lower threshold high byte */
+
+/* MANUFACT_ID */
+#define BH1770_MANUFACT_ROHM 0x01
+#define BH1770_MANUFACT_OSRAM 0x03
+
+/* PART_ID */
+#define BH1770_PART 0x90
+#define BH1770_PART_MASK 0xf0
+#define BH1770_REV_MASK 0x0f
+#define BH1770_REV_SHIFT 0
+#define BH1770_REV_0 0x00
+#define BH1770_REV_1 0x01
+
+/* Operating modes for both */
+#define BH1770_STANDBY 0x00
+#define BH1770_FORCED 0x02
+#define BH1770_STANDALONE 0x03
+#define BH1770_SWRESET (0x01 << 2)
+
+#define BH1770_PS_TRIG_MEAS (1 << 0)
+#define BH1770_ALS_TRIG_MEAS (1 << 1)
+
+/* Interrupt control */
+#define BH1770_INT_OUTPUT_MODE (1 << 3) /* 0 = latched */
+#define BH1770_INT_POLARITY (1 << 2) /* 1 = active high */
+#define BH1770_INT_ALS_ENA (1 << 1)
+#define BH1770_INT_PS_ENA (1 << 0)
+
+/* Interrupt status */
+#define BH1770_INT_LED1_DATA (1 << 0)
+#define BH1770_INT_LED1_INT (1 << 1)
+#define BH1770_INT_LED2_DATA (1 << 2)
+#define BH1770_INT_LED2_INT (1 << 3)
+#define BH1770_INT_LED3_DATA (1 << 4)
+#define BH1770_INT_LED3_INT (1 << 5)
+#define BH1770_INT_LEDS_INT ((1 << 1) | (1 << 3) | (1 << 5))
+#define BH1770_INT_ALS_DATA (1 << 6)
+#define BH1770_INT_ALS_INT (1 << 7)
+
+/* Led channels */
+#define BH1770_LED1 0x00
+
+#define BH1770_DISABLE 0
+#define BH1770_ENABLE 1
+#define BH1770_PROX_CHANNELS 1
+
+#define BH1770_LUX_DEFAULT_RATE 1 /* Index to lux rate table */
+#define BH1770_PROX_DEFAULT_RATE 1 /* Direct HW value =~ 50Hz */
+#define BH1770_PROX_DEF_RATE_THRESH 6 /* Direct HW value =~ 5 Hz */
+#define BH1770_STARTUP_DELAY 50
+#define BH1770_RESET_TIME 10
+#define BH1770_TIMEOUT 2100 /* Timeout in 2.1 seconds */
+
+#define BH1770_LUX_RANGE 65535
+#define BH1770_PROX_RANGE 255
+#define BH1770_COEF_SCALER 1024
+#define BH1770_CALIB_SCALER 8192
+#define BH1770_LUX_NEUTRAL_CALIB_VALUE (1 * BH1770_CALIB_SCALER)
+#define BH1770_LUX_DEF_THRES 1000
+#define BH1770_PROX_DEF_THRES 70
+#define BH1770_PROX_DEF_ABS_THRES 100
+#define BH1770_DEFAULT_PERSISTENCE 10
+#define BH1770_PROX_MAX_PERSISTENCE 50
+#define BH1770_LUX_GA_SCALE 16384
+#define BH1770_LUX_CF_SCALE 2048 /* CF ChipFactor */
+#define BH1770_NEUTRAL_CF BH1770_LUX_CF_SCALE
+#define BH1770_LUX_CORR_SCALE 4096
+
+#define PROX_ABOVE_THRESHOLD 1
+#define PROX_BELOW_THRESHOLD 0
+
+#define PROX_IGNORE_LUX_LIMIT 500
+
+struct bh1770_chip {
+ struct bh1770_platform_data *pdata;
+ char chipname[10];
+ u8 revision;
+ struct i2c_client *client;
+ struct regulator_bulk_data regs[2];
+ struct mutex mutex; /* avoid parallel access */
+ wait_queue_head_t wait;
+
+ bool int_mode_prox;
+ bool int_mode_lux;
+ struct delayed_work prox_work;
+ u32 lux_cf; /* Chip specific factor */
+ u32 lux_ga;
+ u32 lux_calib;
+ int lux_rate_index;
+ u32 lux_corr;
+ u16 lux_data_raw;
+ u16 lux_threshold_hi;
+ u16 lux_threshold_lo;
+ u16 lux_thres_hi_onchip;
+ u16 lux_thres_lo_onchip;
+ bool lux_wait_result;
+
+ int prox_enable_count;
+ u16 prox_coef;
+ u16 prox_const;
+ int prox_rate;
+ int prox_rate_threshold;
+ u8 prox_persistence;
+ u8 prox_persistence_counter;
+ u8 prox_data;
+ u8 prox_threshold;
+ u8 prox_threshold_hw;
+ bool prox_force_update;
+ u8 prox_abs_thres;
+ u8 prox_led;
+};
+
+static const char reg_vcc[] = "Vcc";
+static const char reg_vleds[] = "Vleds";
+
+/*
+ * Supported stand alone rates in ms from chip data sheet
+ * {10, 20, 30, 40, 70, 100, 200, 500, 1000, 2000};
+ */
+static const s16 prox_rates_hz[] = {100, 50, 33, 25, 14, 10, 5, 2};
+static const s16 prox_rates_ms[] = {10, 20, 30, 40, 70, 100, 200, 500};
+
+/* Supported IR-led currents in mA */
+static const u8 prox_curr_ma[] = {5, 10, 20, 50, 100, 150, 200};
+
+/*
+ * Supported stand alone rates in ms from chip data sheet
+ * {100, 200, 500, 1000, 2000};
+ */
+static const s16 lux_rates_hz[] = {10, 5, 2, 1, 0};
+
+/*
+ * interrupt control functions are called while keeping chip->mutex
+ * excluding module probe / remove
+ */
+static inline int bh1770_lux_interrupt_control(struct bh1770_chip *chip,
+ int lux)
+{
+ chip->int_mode_lux = lux;
+ /* Set interrupt modes, interrupt active low, latched */
+ return i2c_smbus_write_byte_data(chip->client,
+ BH1770_INTERRUPT,
+ (lux << 1) | chip->int_mode_prox);
+}
+
+static inline int bh1770_prox_interrupt_control(struct bh1770_chip *chip,
+ int ps)
+{
+ chip->int_mode_prox = ps;
+ return i2c_smbus_write_byte_data(chip->client,
+ BH1770_INTERRUPT,
+ (chip->int_mode_lux << 1) | (ps << 0));
+}
+
+/* chip->mutex is always kept here */
+static int bh1770_lux_rate(struct bh1770_chip *chip, int rate_index)
+{
+ /* sysfs may call this when the chip is powered off */
+ if (pm_runtime_suspended(&chip->client->dev))
+ return 0;
+
+ /* Proper proximity response needs fastest lux rate (100ms) */
+ if (chip->prox_enable_count)
+ rate_index = 0;
+
+ return i2c_smbus_write_byte_data(chip->client,
+ BH1770_ALS_MEAS_RATE,
+ rate_index);
+}
+
+static int bh1770_prox_rate(struct bh1770_chip *chip, int mode)
+{
+ int rate;
+
+ rate = (mode == PROX_ABOVE_THRESHOLD) ?
+ chip->prox_rate_threshold : chip->prox_rate;
+
+ return i2c_smbus_write_byte_data(chip->client,
+ BH1770_PS_MEAS_RATE,
+ rate);
+}
+
+/* InfraredLED is controlled by the chip during proximity scanning */
+static inline int bh1770_led_cfg(struct bh1770_chip *chip)
+{
+ /* LED cfg, current for leds 1 and 2 */
+ return i2c_smbus_write_byte_data(chip->client,
+ BH1770_I_LED,
+ (BH1770_LED1 << 6) |
+ (BH1770_LED_5mA << 3) |
+ chip->prox_led);
+}
+
+/*
+ * Following two functions converts raw ps values from HW to normalized
+ * values. Purpose is to compensate differences between different sensor
+ * versions and variants so that result means about the same between
+ * versions.
+ */
+static inline u8 bh1770_psraw_to_adjusted(struct bh1770_chip *chip, u8 psraw)
+{
+ u16 adjusted;
+ adjusted = (u16)(((u32)(psraw + chip->prox_const) * chip->prox_coef) /
+ BH1770_COEF_SCALER);
+ if (adjusted > BH1770_PROX_RANGE)
+ adjusted = BH1770_PROX_RANGE;
+ return adjusted;
+}
+
+static inline u8 bh1770_psadjusted_to_raw(struct bh1770_chip *chip, u8 ps)
+{
+ u16 raw;
+
+ raw = (((u32)ps * BH1770_COEF_SCALER) / chip->prox_coef);
+ if (raw > chip->prox_const)
+ raw = raw - chip->prox_const;
+ else
+ raw = 0;
+ return raw;
+}
+
+/*
+ * Following two functions converts raw lux values from HW to normalized
+ * values. Purpose is to compensate differences between different sensor
+ * versions and variants so that result means about the same between
+ * versions. Chip->mutex is kept when this is called.
+ */
+static int bh1770_prox_set_threshold(struct bh1770_chip *chip)
+{
+ u8 tmp = 0;
+
+ /* sysfs may call this when the chip is powered off */
+ if (pm_runtime_suspended(&chip->client->dev))
+ return 0;
+
+ tmp = bh1770_psadjusted_to_raw(chip, chip->prox_threshold);
+ chip->prox_threshold_hw = tmp;
+
+ return i2c_smbus_write_byte_data(chip->client, BH1770_PS_TH_LED1,
+ tmp);
+}
+
+static inline u16 bh1770_lux_raw_to_adjusted(struct bh1770_chip *chip, u16 raw)
+{
+ u32 lux;
+ lux = ((u32)raw * chip->lux_corr) / BH1770_LUX_CORR_SCALE;
+ return min(lux, (u32)BH1770_LUX_RANGE);
+}
+
+static inline u16 bh1770_lux_adjusted_to_raw(struct bh1770_chip *chip,
+ u16 adjusted)
+{
+ return (u32)adjusted * BH1770_LUX_CORR_SCALE / chip->lux_corr;
+}
+
+/* chip->mutex is kept when this is called */
+static int bh1770_lux_update_thresholds(struct bh1770_chip *chip,
+ u16 threshold_hi, u16 threshold_lo)
+{
+ u8 data[4];
+ int ret;
+
+ /* sysfs may call this when the chip is powered off */
+ if (pm_runtime_suspended(&chip->client->dev))
+ return 0;
+
+ /*
+ * Compensate threshold values with the correction factors if not
+ * set to minimum or maximum.
+ * Min & max values disables interrupts.
+ */
+ if (threshold_hi != BH1770_LUX_RANGE && threshold_hi != 0)
+ threshold_hi = bh1770_lux_adjusted_to_raw(chip, threshold_hi);
+
+ if (threshold_lo != BH1770_LUX_RANGE && threshold_lo != 0)
+ threshold_lo = bh1770_lux_adjusted_to_raw(chip, threshold_lo);
+
+ if (chip->lux_thres_hi_onchip == threshold_hi &&
+ chip->lux_thres_lo_onchip == threshold_lo)
+ return 0;
+
+ chip->lux_thres_hi_onchip = threshold_hi;
+ chip->lux_thres_lo_onchip = threshold_lo;
+
+ data[0] = threshold_hi;
+ data[1] = threshold_hi >> 8;
+ data[2] = threshold_lo;
+ data[3] = threshold_lo >> 8;
+
+ ret = i2c_smbus_write_i2c_block_data(chip->client,
+ BH1770_ALS_TH_UP_0,
+ ARRAY_SIZE(data),
+ data);
+ return ret;
+}
+
+static int bh1770_lux_get_result(struct bh1770_chip *chip)
+{
+ u16 data;
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(chip->client, BH1770_ALS_DATA_0);
+ if (ret < 0)
+ return ret;
+
+ data = ret & 0xff;
+ ret = i2c_smbus_read_byte_data(chip->client, BH1770_ALS_DATA_1);
+ if (ret < 0)
+ return ret;
+
+ chip->lux_data_raw = data | ((ret & 0xff) << 8);
+
+ return 0;
+}
+
+/* Calculate correction value which contains chip and device specific parts */
+static u32 bh1770_get_corr_value(struct bh1770_chip *chip)
+{
+ u32 tmp;
+ /* Impact of glass attenuation correction */
+ tmp = (BH1770_LUX_CORR_SCALE * chip->lux_ga) / BH1770_LUX_GA_SCALE;
+ /* Impact of chip factor correction */
+ tmp = (tmp * chip->lux_cf) / BH1770_LUX_CF_SCALE;
+ /* Impact of Device specific calibration correction */
+ tmp = (tmp * chip->lux_calib) / BH1770_CALIB_SCALER;
+ return tmp;
+}
+
+static int bh1770_lux_read_result(struct bh1770_chip *chip)
+{
+ bh1770_lux_get_result(chip);
+ return bh1770_lux_raw_to_adjusted(chip, chip->lux_data_raw);
+}
+
+/*
+ * Chip on / off functions are called while keeping mutex except probe
+ * or remove phase
+ */
+static int bh1770_chip_on(struct bh1770_chip *chip)
+{
+ int ret = regulator_bulk_enable(ARRAY_SIZE(chip->regs),
+ chip->regs);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(BH1770_STARTUP_DELAY, BH1770_STARTUP_DELAY * 2);
+
+ /* Reset the chip */
+ i2c_smbus_write_byte_data(chip->client, BH1770_ALS_CONTROL,
+ BH1770_SWRESET);
+ usleep_range(BH1770_RESET_TIME, BH1770_RESET_TIME * 2);
+
+ /*
+ * ALS is started always since proximity needs als results
+ * for realibility estimation.
+ * Let's assume dark until the first ALS measurement is ready.
+ */
+ chip->lux_data_raw = 0;
+ chip->prox_data = 0;
+ ret = i2c_smbus_write_byte_data(chip->client,
+ BH1770_ALS_CONTROL, BH1770_STANDALONE);
+
+ /* Assume reset defaults */
+ chip->lux_thres_hi_onchip = BH1770_LUX_RANGE;
+ chip->lux_thres_lo_onchip = 0;
+
+ return ret;
+}
+
+static void bh1770_chip_off(struct bh1770_chip *chip)
+{
+ i2c_smbus_write_byte_data(chip->client,
+ BH1770_INTERRUPT, BH1770_DISABLE);
+ i2c_smbus_write_byte_data(chip->client,
+ BH1770_ALS_CONTROL, BH1770_STANDBY);
+ i2c_smbus_write_byte_data(chip->client,
+ BH1770_PS_CONTROL, BH1770_STANDBY);
+ regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs);
+}
+
+/* chip->mutex is kept when this is called */
+static int bh1770_prox_mode_control(struct bh1770_chip *chip)
+{
+ if (chip->prox_enable_count) {
+ chip->prox_force_update = true; /* Force immediate update */
+
+ bh1770_lux_rate(chip, chip->lux_rate_index);
+ bh1770_prox_set_threshold(chip);
+ bh1770_led_cfg(chip);
+ bh1770_prox_rate(chip, PROX_BELOW_THRESHOLD);
+ bh1770_prox_interrupt_control(chip, BH1770_ENABLE);
+ i2c_smbus_write_byte_data(chip->client,
+ BH1770_PS_CONTROL, BH1770_STANDALONE);
+ } else {
+ chip->prox_data = 0;
+ bh1770_lux_rate(chip, chip->lux_rate_index);
+ bh1770_prox_interrupt_control(chip, BH1770_DISABLE);
+ i2c_smbus_write_byte_data(chip->client,
+ BH1770_PS_CONTROL, BH1770_STANDBY);
+ }
+ return 0;
+}
+
+/* chip->mutex is kept when this is called */
+static int bh1770_prox_read_result(struct bh1770_chip *chip)
+{
+ int ret;
+ bool above;
+ u8 mode;
+
+ ret = i2c_smbus_read_byte_data(chip->client, BH1770_PS_DATA_LED1);
+ if (ret < 0)
+ goto out;
+
+ if (ret > chip->prox_threshold_hw)
+ above = true;
+ else
+ above = false;
+
+ /*
+ * when ALS levels goes above limit, proximity result may be
+ * false proximity. Thus ignore the result. With real proximity
+ * there is a shadow causing low als levels.
+ */
+ if (chip->lux_data_raw > PROX_IGNORE_LUX_LIMIT)
+ ret = 0;
+
+ chip->prox_data = bh1770_psraw_to_adjusted(chip, ret);
+
+ /* Strong proximity level or force mode requires immediate response */
+ if (chip->prox_data >= chip->prox_abs_thres ||
+ chip->prox_force_update)
+ chip->prox_persistence_counter = chip->prox_persistence;
+
+ chip->prox_force_update = false;
+
+ /* Persistence filttering to reduce false proximity events */
+ if (likely(above)) {
+ if (chip->prox_persistence_counter < chip->prox_persistence) {
+ chip->prox_persistence_counter++;
+ ret = -ENODATA;
+ } else {
+ mode = PROX_ABOVE_THRESHOLD;
+ ret = 0;
+ }
+ } else {
+ chip->prox_persistence_counter = 0;
+ mode = PROX_BELOW_THRESHOLD;
+ chip->prox_data = 0;
+ ret = 0;
+ }
+
+ /* Set proximity detection rate based on above or below value */
+ if (ret == 0) {
+ bh1770_prox_rate(chip, mode);
+ sysfs_notify(&chip->client->dev.kobj, NULL, "prox0_raw");
+ }
+out:
+ return ret;
+}
+
+static int bh1770_detect(struct bh1770_chip *chip)
+{
+ struct i2c_client *client = chip->client;
+ s32 ret;
+ u8 manu, part;
+
+ ret = i2c_smbus_read_byte_data(client, BH1770_MANUFACT_ID);
+ if (ret < 0)
+ goto error;
+ manu = (u8)ret;
+
+ ret = i2c_smbus_read_byte_data(client, BH1770_PART_ID);
+ if (ret < 0)
+ goto error;
+ part = (u8)ret;
+
+ chip->revision = (part & BH1770_REV_MASK) >> BH1770_REV_SHIFT;
+ chip->prox_coef = BH1770_COEF_SCALER;
+ chip->prox_const = 0;
+ chip->lux_cf = BH1770_NEUTRAL_CF;
+
+ if ((manu == BH1770_MANUFACT_ROHM) &&
+ ((part & BH1770_PART_MASK) == BH1770_PART)) {
+ snprintf(chip->chipname, sizeof(chip->chipname), "BH1770GLC");
+ return 0;
+ }
+
+ if ((manu == BH1770_MANUFACT_OSRAM) &&
+ ((part & BH1770_PART_MASK) == BH1770_PART)) {
+ snprintf(chip->chipname, sizeof(chip->chipname), "SFH7770");
+ /* Values selected by comparing different versions */
+ chip->prox_coef = 819; /* 0.8 * BH1770_COEF_SCALER */
+ chip->prox_const = 40;
+ return 0;
+ }
+
+ ret = -ENODEV;
+error:
+ dev_dbg(&client->dev, "BH1770 or SFH7770 not found\n");
+
+ return ret;
+}
+
+/*
+ * This work is re-scheduled at every proximity interrupt.
+ * If this work is running, it means that there hasn't been any
+ * proximity interrupt in time. Situation is handled as no-proximity.
+ * It would be nice to have low-threshold interrupt or interrupt
+ * when measurement and hi-threshold are both 0. But neither of those exists.
+ * This is a workaroud for missing HW feature.
+ */
+
+static void bh1770_prox_work(struct work_struct *work)
+{
+ struct bh1770_chip *chip =
+ container_of(work, struct bh1770_chip, prox_work.work);
+
+ mutex_lock(&chip->mutex);
+ bh1770_prox_read_result(chip);
+ mutex_unlock(&chip->mutex);
+}
+
+/* This is threaded irq handler */
+static irqreturn_t bh1770_irq(int irq, void *data)
+{
+ struct bh1770_chip *chip = data;
+ int status;
+ int rate = 0;
+
+ mutex_lock(&chip->mutex);
+ status = i2c_smbus_read_byte_data(chip->client, BH1770_ALS_PS_STATUS);
+
+ /* Acknowledge interrupt by reading this register */
+ i2c_smbus_read_byte_data(chip->client, BH1770_INTERRUPT);
+
+ /*
+ * Check if there is fresh data available for als.
+ * If this is the very first data, update thresholds after that.
+ */
+ if (status & BH1770_INT_ALS_DATA) {
+ bh1770_lux_get_result(chip);
+ if (unlikely(chip->lux_wait_result)) {
+ chip->lux_wait_result = false;
+ wake_up(&chip->wait);
+ bh1770_lux_update_thresholds(chip,
+ chip->lux_threshold_hi,
+ chip->lux_threshold_lo);
+ }
+ }
+
+ /* Disable interrupt logic to guarantee acknowledgement */
+ i2c_smbus_write_byte_data(chip->client, BH1770_INTERRUPT,
+ (0 << 1) | (0 << 0));
+
+ if ((status & BH1770_INT_ALS_INT))
+ sysfs_notify(&chip->client->dev.kobj, NULL, "lux0_input");
+
+ if (chip->int_mode_prox && (status & BH1770_INT_LEDS_INT)) {
+ rate = prox_rates_ms[chip->prox_rate_threshold];
+ bh1770_prox_read_result(chip);
+ }
+
+ /* Re-enable interrupt logic */
+ i2c_smbus_write_byte_data(chip->client, BH1770_INTERRUPT,
+ (chip->int_mode_lux << 1) |
+ (chip->int_mode_prox << 0));
+ mutex_unlock(&chip->mutex);
+
+ /*
+ * Can't cancel work while keeping mutex since the work uses the
+ * same mutex.
+ */
+ if (rate) {
+ /*
+ * Simulate missing no-proximity interrupt 50ms after the
+ * next expected interrupt time.
+ */
+ cancel_delayed_work_sync(&chip->prox_work);
+ schedule_delayed_work(&chip->prox_work,
+ msecs_to_jiffies(rate + 50));
+ }
+ return IRQ_HANDLED;
+}
+
+static ssize_t bh1770_power_state_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ unsigned long value;
+ ssize_t ret;
+
+ if (strict_strtoul(buf, 0, &value))
+ return -EINVAL;
+
+ mutex_lock(&chip->mutex);
+ if (value) {
+ pm_runtime_get_sync(dev);
+
+ ret = bh1770_lux_rate(chip, chip->lux_rate_index);
+ if (ret < 0) {
+ pm_runtime_put(dev);
+ goto leave;
+ }
+
+ ret = bh1770_lux_interrupt_control(chip, BH1770_ENABLE);
+ if (ret < 0) {
+ pm_runtime_put(dev);
+ goto leave;
+ }
+
+ /* This causes interrupt after the next measurement cycle */
+ bh1770_lux_update_thresholds(chip, BH1770_LUX_DEF_THRES,
+ BH1770_LUX_DEF_THRES);
+ /* Inform that we are waiting for a result from ALS */
+ chip->lux_wait_result = true;
+ bh1770_prox_mode_control(chip);
+ } else if (!pm_runtime_suspended(dev)) {
+ pm_runtime_put(dev);
+ }
+ ret = count;
+leave:
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static ssize_t bh1770_power_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", !pm_runtime_suspended(dev));
+}
+
+static ssize_t bh1770_lux_result_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ ssize_t ret;
+ long timeout;
+
+ if (pm_runtime_suspended(dev))
+ return -EIO; /* Chip is not enabled at all */
+
+ timeout = wait_event_interruptible_timeout(chip->wait,
+ !chip->lux_wait_result,
+ msecs_to_jiffies(BH1770_TIMEOUT));
+ if (!timeout)
+ return -EIO;
+
+ mutex_lock(&chip->mutex);
+ ret = sprintf(buf, "%d\n", bh1770_lux_read_result(chip));
+ mutex_unlock(&chip->mutex);
+
+ return ret;
+}
+
+static ssize_t bh1770_lux_range_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", BH1770_LUX_RANGE);
+}
+
+static ssize_t bh1770_prox_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ unsigned long value;
+
+ if (strict_strtoul(buf, 0, &value))
+ return -EINVAL;
+
+ mutex_lock(&chip->mutex);
+ /* Assume no proximity. Sensor will tell real state soon */
+ if (!chip->prox_enable_count)
+ chip->prox_data = 0;
+
+ if (value)
+ chip->prox_enable_count++;
+ else if (chip->prox_enable_count > 0)
+ chip->prox_enable_count--;
+ else
+ goto leave;
+
+ /* Run control only when chip is powered on */
+ if (!pm_runtime_suspended(dev))
+ bh1770_prox_mode_control(chip);
+leave:
+ mutex_unlock(&chip->mutex);
+ return count;
+}
+
+static ssize_t bh1770_prox_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ ssize_t len;
+
+ mutex_lock(&chip->mutex);
+ len = sprintf(buf, "%d\n", chip->prox_enable_count);
+ mutex_unlock(&chip->mutex);
+ return len;
+}
+
+static ssize_t bh1770_prox_result_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ ssize_t ret;
+
+ mutex_lock(&chip->mutex);
+ if (chip->prox_enable_count && !pm_runtime_suspended(dev))
+ ret = sprintf(buf, "%d\n", chip->prox_data);
+ else
+ ret = -EIO;
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static ssize_t bh1770_prox_range_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", BH1770_PROX_RANGE);
+}
+
+static ssize_t bh1770_get_prox_rate_avail(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int i;
+ int pos = 0;
+ for (i = 0; i < ARRAY_SIZE(prox_rates_hz); i++)
+ pos += sprintf(buf + pos, "%d ", prox_rates_hz[i]);
+ sprintf(buf + pos - 1, "\n");
+ return pos;
+}
+
+static ssize_t bh1770_get_prox_rate_above(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", prox_rates_hz[chip->prox_rate_threshold]);
+}
+
+static ssize_t bh1770_get_prox_rate_below(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", prox_rates_hz[chip->prox_rate]);
+}
+
+static int bh1770_prox_rate_validate(int rate)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(prox_rates_hz) - 1; i++)
+ if (rate >= prox_rates_hz[i])
+ break;
+ return i;
+}
+
+static ssize_t bh1770_set_prox_rate_above(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ unsigned long value;
+
+ if (strict_strtoul(buf, 0, &value))
+ return -EINVAL;
+
+ mutex_lock(&chip->mutex);
+ chip->prox_rate_threshold = bh1770_prox_rate_validate(value);
+ mutex_unlock(&chip->mutex);
+ return count;
+}
+
+static ssize_t bh1770_set_prox_rate_below(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ unsigned long value;
+
+ if (strict_strtoul(buf, 0, &value))
+ return -EINVAL;
+
+ mutex_lock(&chip->mutex);
+ chip->prox_rate = bh1770_prox_rate_validate(value);
+ mutex_unlock(&chip->mutex);
+ return count;
+}
+
+static ssize_t bh1770_get_prox_thres(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", chip->prox_threshold);
+}
+
+static ssize_t bh1770_set_prox_thres(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ unsigned long value;
+ int ret;
+
+ if (strict_strtoul(buf, 0, &value))
+ return -EINVAL;
+ if (value > BH1770_PROX_RANGE)
+ return -EINVAL;
+
+ mutex_lock(&chip->mutex);
+ chip->prox_threshold = value;
+ ret = bh1770_prox_set_threshold(chip);
+ mutex_unlock(&chip->mutex);
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
+static ssize_t bh1770_prox_persistence_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%u\n", chip->prox_persistence);
+}
+
+static ssize_t bh1770_prox_persistence_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ unsigned long value;
+
+ if (strict_strtoul(buf, 0, &value))
+ return -EINVAL;
+
+ if (value > BH1770_PROX_MAX_PERSISTENCE)
+ return -EINVAL;
+
+ chip->prox_persistence = value;
+
+ return len;
+}
+
+static ssize_t bh1770_prox_abs_thres_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ return sprintf(buf, "%u\n", chip->prox_abs_thres);
+}
+
+static ssize_t bh1770_prox_abs_thres_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ unsigned long value;
+
+ if (strict_strtoul(buf, 0, &value))
+ return -EINVAL;
+
+ if (value > BH1770_PROX_RANGE)
+ return -EINVAL;
+
+ chip->prox_abs_thres = value;
+
+ return len;
+}
+
+static ssize_t bh1770_chip_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ return sprintf(buf, "%s rev %d\n", chip->chipname, chip->revision);
+}
+
+static ssize_t bh1770_lux_calib_default_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", BH1770_CALIB_SCALER);
+}
+
+static ssize_t bh1770_lux_calib_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ ssize_t len;
+
+ mutex_lock(&chip->mutex);
+ len = sprintf(buf, "%u\n", chip->lux_calib);
+ mutex_unlock(&chip->mutex);
+ return len;
+}
+
+static ssize_t bh1770_lux_calib_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ unsigned long value;
+ u32 old_calib;
+ u32 new_corr;
+
+ if (strict_strtoul(buf, 0, &value))
+ return -EINVAL;
+
+ mutex_lock(&chip->mutex);
+ old_calib = chip->lux_calib;
+ chip->lux_calib = value;
+ new_corr = bh1770_get_corr_value(chip);
+ if (new_corr == 0) {
+ chip->lux_calib = old_calib;
+ mutex_unlock(&chip->mutex);
+ return -EINVAL;
+ }
+ chip->lux_corr = new_corr;
+ /* Refresh thresholds on HW after changing correction value */
+ bh1770_lux_update_thresholds(chip, chip->lux_threshold_hi,
+ chip->lux_threshold_lo);
+
+ mutex_unlock(&chip->mutex);
+
+ return len;
+}
+
+static ssize_t bh1770_get_lux_rate_avail(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int i;
+ int pos = 0;
+ for (i = 0; i < ARRAY_SIZE(lux_rates_hz); i++)
+ pos += sprintf(buf + pos, "%d ", lux_rates_hz[i]);
+ sprintf(buf + pos - 1, "\n");
+ return pos;
+}
+
+static ssize_t bh1770_get_lux_rate(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", lux_rates_hz[chip->lux_rate_index]);
+}
+
+static ssize_t bh1770_set_lux_rate(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ unsigned long rate_hz;
+ int ret, i;
+
+ if (strict_strtoul(buf, 0, &rate_hz))
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(lux_rates_hz) - 1; i++)
+ if (rate_hz >= lux_rates_hz[i])
+ break;
+
+ mutex_lock(&chip->mutex);
+ chip->lux_rate_index = i;
+ ret = bh1770_lux_rate(chip, i);
+ mutex_unlock(&chip->mutex);
+
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static ssize_t bh1770_get_lux_thresh_above(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", chip->lux_threshold_hi);
+}
+
+static ssize_t bh1770_get_lux_thresh_below(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", chip->lux_threshold_lo);
+}
+
+static ssize_t bh1770_set_lux_thresh(struct bh1770_chip *chip, u16 *target,
+ const char *buf)
+{
+ int ret = 0;
+ unsigned long thresh;
+
+ if (strict_strtoul(buf, 0, &thresh))
+ return -EINVAL;
+
+ if (thresh > BH1770_LUX_RANGE)
+ return -EINVAL;
+
+ mutex_lock(&chip->mutex);
+ *target = thresh;
+ /*
+ * Don't update values in HW if we are still waiting for
+ * first interrupt to come after device handle open call.
+ */
+ if (!chip->lux_wait_result)
+ ret = bh1770_lux_update_thresholds(chip,
+ chip->lux_threshold_hi,
+ chip->lux_threshold_lo);
+ mutex_unlock(&chip->mutex);
+ return ret;
+
+}
+
+static ssize_t bh1770_set_lux_thresh_above(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ int ret = bh1770_set_lux_thresh(chip, &chip->lux_threshold_hi, buf);
+ if (ret < 0)
+ return ret;
+ return len;
+}
+
+static ssize_t bh1770_set_lux_thresh_below(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ int ret = bh1770_set_lux_thresh(chip, &chip->lux_threshold_lo, buf);
+ if (ret < 0)
+ return ret;
+ return len;
+}
+
+static DEVICE_ATTR(prox0_raw_en, S_IRUGO | S_IWUSR, bh1770_prox_enable_show,
+ bh1770_prox_enable_store);
+static DEVICE_ATTR(prox0_thresh_above1_value, S_IRUGO | S_IWUSR,
+ bh1770_prox_abs_thres_show,
+ bh1770_prox_abs_thres_store);
+static DEVICE_ATTR(prox0_thresh_above0_value, S_IRUGO | S_IWUSR,
+ bh1770_get_prox_thres,
+ bh1770_set_prox_thres);
+static DEVICE_ATTR(prox0_raw, S_IRUGO, bh1770_prox_result_show, NULL);
+static DEVICE_ATTR(prox0_sensor_range, S_IRUGO, bh1770_prox_range_show, NULL);
+static DEVICE_ATTR(prox0_thresh_above_count, S_IRUGO | S_IWUSR,
+ bh1770_prox_persistence_show,
+ bh1770_prox_persistence_store);
+static DEVICE_ATTR(prox0_rate_above, S_IRUGO | S_IWUSR,
+ bh1770_get_prox_rate_above,
+ bh1770_set_prox_rate_above);
+static DEVICE_ATTR(prox0_rate_below, S_IRUGO | S_IWUSR,
+ bh1770_get_prox_rate_below,
+ bh1770_set_prox_rate_below);
+static DEVICE_ATTR(prox0_rate_avail, S_IRUGO, bh1770_get_prox_rate_avail, NULL);
+
+static DEVICE_ATTR(lux0_calibscale, S_IRUGO | S_IWUSR, bh1770_lux_calib_show,
+ bh1770_lux_calib_store);
+static DEVICE_ATTR(lux0_calibscale_default, S_IRUGO,
+ bh1770_lux_calib_default_show,
+ NULL);
+static DEVICE_ATTR(lux0_input, S_IRUGO, bh1770_lux_result_show, NULL);
+static DEVICE_ATTR(lux0_sensor_range, S_IRUGO, bh1770_lux_range_show, NULL);
+static DEVICE_ATTR(lux0_rate, S_IRUGO | S_IWUSR, bh1770_get_lux_rate,
+ bh1770_set_lux_rate);
+static DEVICE_ATTR(lux0_rate_avail, S_IRUGO, bh1770_get_lux_rate_avail, NULL);
+static DEVICE_ATTR(lux0_thresh_above_value, S_IRUGO | S_IWUSR,
+ bh1770_get_lux_thresh_above,
+ bh1770_set_lux_thresh_above);
+static DEVICE_ATTR(lux0_thresh_below_value, S_IRUGO | S_IWUSR,
+ bh1770_get_lux_thresh_below,
+ bh1770_set_lux_thresh_below);
+static DEVICE_ATTR(chip_id, S_IRUGO, bh1770_chip_id_show, NULL);
+static DEVICE_ATTR(power_state, S_IRUGO | S_IWUSR, bh1770_power_state_show,
+ bh1770_power_state_store);
+
+
+static struct attribute *sysfs_attrs[] = {
+ &dev_attr_lux0_calibscale.attr,
+ &dev_attr_lux0_calibscale_default.attr,
+ &dev_attr_lux0_input.attr,
+ &dev_attr_lux0_sensor_range.attr,
+ &dev_attr_lux0_rate.attr,
+ &dev_attr_lux0_rate_avail.attr,
+ &dev_attr_lux0_thresh_above_value.attr,
+ &dev_attr_lux0_thresh_below_value.attr,
+ &dev_attr_prox0_raw.attr,
+ &dev_attr_prox0_sensor_range.attr,
+ &dev_attr_prox0_raw_en.attr,
+ &dev_attr_prox0_thresh_above_count.attr,
+ &dev_attr_prox0_rate_above.attr,
+ &dev_attr_prox0_rate_below.attr,
+ &dev_attr_prox0_rate_avail.attr,
+ &dev_attr_prox0_thresh_above0_value.attr,
+ &dev_attr_prox0_thresh_above1_value.attr,
+ &dev_attr_chip_id.attr,
+ &dev_attr_power_state.attr,
+ NULL
+};
+
+static struct attribute_group bh1770_attribute_group = {
+ .attrs = sysfs_attrs
+};
+
+static int __devinit bh1770_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct bh1770_chip *chip;
+ int err;
+
+ chip = kzalloc(sizeof *chip, GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, chip);
+ chip->client = client;
+
+ mutex_init(&chip->mutex);
+ init_waitqueue_head(&chip->wait);
+ INIT_DELAYED_WORK(&chip->prox_work, bh1770_prox_work);
+
+ if (client->dev.platform_data == NULL) {
+ dev_err(&client->dev, "platform data is mandatory\n");
+ err = -EINVAL;
+ goto fail1;
+ }
+
+ chip->pdata = client->dev.platform_data;
+ chip->lux_calib = BH1770_LUX_NEUTRAL_CALIB_VALUE;
+ chip->lux_rate_index = BH1770_LUX_DEFAULT_RATE;
+ chip->lux_threshold_lo = BH1770_LUX_DEF_THRES;
+ chip->lux_threshold_hi = BH1770_LUX_DEF_THRES;
+
+ if (chip->pdata->glass_attenuation == 0)
+ chip->lux_ga = BH1770_NEUTRAL_GA;
+ else
+ chip->lux_ga = chip->pdata->glass_attenuation;
+
+ chip->prox_threshold = BH1770_PROX_DEF_THRES;
+ chip->prox_led = chip->pdata->led_def_curr;
+ chip->prox_abs_thres = BH1770_PROX_DEF_ABS_THRES;
+ chip->prox_persistence = BH1770_DEFAULT_PERSISTENCE;
+ chip->prox_rate_threshold = BH1770_PROX_DEF_RATE_THRESH;
+ chip->prox_rate = BH1770_PROX_DEFAULT_RATE;
+ chip->prox_data = 0;
+
+ chip->regs[0].supply = reg_vcc;
+ chip->regs[1].supply = reg_vleds;
+
+ err = regulator_bulk_get(&client->dev,
+ ARRAY_SIZE(chip->regs), chip->regs);
+ if (err < 0) {
+ dev_err(&client->dev, "Cannot get regulators\n");
+ goto fail1;
+ }
+
+ err = regulator_bulk_enable(ARRAY_SIZE(chip->regs),
+ chip->regs);
+ if (err < 0) {
+ dev_err(&client->dev, "Cannot enable regulators\n");
+ goto fail2;
+ }
+
+ usleep_range(BH1770_STARTUP_DELAY, BH1770_STARTUP_DELAY * 2);
+ err = bh1770_detect(chip);
+ if (err < 0)
+ goto fail3;
+
+ /* Start chip */
+ bh1770_chip_on(chip);
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+
+ chip->lux_corr = bh1770_get_corr_value(chip);
+ if (chip->lux_corr == 0) {
+ dev_err(&client->dev, "Improper correction values\n");
+ err = -EINVAL;
+ goto fail3;
+ }
+
+ if (chip->pdata->setup_resources) {
+ err = chip->pdata->setup_resources();
+ if (err) {
+ err = -EINVAL;
+ goto fail3;
+ }
+ }
+
+ err = sysfs_create_group(&chip->client->dev.kobj,
+ &bh1770_attribute_group);
+ if (err < 0) {
+ dev_err(&chip->client->dev, "Sysfs registration failed\n");
+ goto fail4;
+ }
+
+ /*
+ * Chip needs level triggered interrupt to work. However,
+ * level triggering doesn't work always correctly with power
+ * management. Select both
+ */
+ err = request_threaded_irq(client->irq, NULL,
+ bh1770_irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT |
+ IRQF_TRIGGER_LOW,
+ "bh1770", chip);
+ if (err) {
+ dev_err(&client->dev, "could not get IRQ %d\n",
+ client->irq);
+ goto fail5;
+ }
+ regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs);
+ return err;
+fail5:
+ sysfs_remove_group(&chip->client->dev.kobj,
+ &bh1770_attribute_group);
+fail4:
+ if (chip->pdata->release_resources)
+ chip->pdata->release_resources();
+fail3:
+ regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs);
+fail2:
+ regulator_bulk_free(ARRAY_SIZE(chip->regs), chip->regs);
+fail1:
+ kfree(chip);
+ return err;
+}
+
+static int __devexit bh1770_remove(struct i2c_client *client)
+{
+ struct bh1770_chip *chip = i2c_get_clientdata(client);
+
+ free_irq(client->irq, chip);
+
+ sysfs_remove_group(&chip->client->dev.kobj,
+ &bh1770_attribute_group);
+
+ if (chip->pdata->release_resources)
+ chip->pdata->release_resources();
+
+ cancel_delayed_work_sync(&chip->prox_work);
+
+ if (!pm_runtime_suspended(&client->dev))
+ bh1770_chip_off(chip);
+
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+
+ regulator_bulk_free(ARRAY_SIZE(chip->regs), chip->regs);
+ kfree(chip);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int bh1770_suspend(struct device *dev)
+{
+ struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct bh1770_chip *chip = i2c_get_clientdata(client);
+
+ bh1770_chip_off(chip);
+
+ return 0;
+}
+
+static int bh1770_resume(struct device *dev)
+{
+ struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct bh1770_chip *chip = i2c_get_clientdata(client);
+ int ret = 0;
+
+ bh1770_chip_on(chip);
+
+ if (!pm_runtime_suspended(dev)) {
+ /*
+ * If we were enabled at suspend time, it is expected
+ * everything works nice and smoothly
+ */
+ ret = bh1770_lux_rate(chip, chip->lux_rate_index);
+ ret |= bh1770_lux_interrupt_control(chip, BH1770_ENABLE);
+
+ /* This causes interrupt after the next measurement cycle */
+ bh1770_lux_update_thresholds(chip, BH1770_LUX_DEF_THRES,
+ BH1770_LUX_DEF_THRES);
+ /* Inform that we are waiting for a result from ALS */
+ chip->lux_wait_result = true;
+ bh1770_prox_mode_control(chip);
+ }
+ return ret;
+}
+
+#else
+#define bh1770_suspend NULL
+#define bh1770_shutdown NULL
+#define bh1770_resume NULL
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int bh1770_runtime_suspend(struct device *dev)
+{
+ struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct bh1770_chip *chip = i2c_get_clientdata(client);
+
+ bh1770_chip_off(chip);
+
+ return 0;
+}
+
+static int bh1770_runtime_resume(struct device *dev)
+{
+ struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct bh1770_chip *chip = i2c_get_clientdata(client);
+
+ bh1770_chip_on(chip);
+
+ return 0;
+}
+#endif
+
+static const struct i2c_device_id bh1770_id[] = {
+ {"bh1770glc", 0 },
+ {"sfh7770", 0 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, bh1770_id);
+
+static const struct dev_pm_ops bh1770_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(bh1770_suspend, bh1770_resume)
+ SET_RUNTIME_PM_OPS(bh1770_runtime_suspend, bh1770_runtime_resume, NULL)
+};
+
+static struct i2c_driver bh1770_driver = {
+ .driver = {
+ .name = "bh1770glc",
+ .owner = THIS_MODULE,
+ .pm = &bh1770_pm_ops,
+ },
+ .probe = bh1770_probe,
+ .remove = __devexit_p(bh1770_remove),
+ .id_table = bh1770_id,
+};
+
+module_i2c_driver(bh1770_driver);
+
+MODULE_DESCRIPTION("BH1770GLC / SFH7770 combined ALS and proximity sensor");
+MODULE_AUTHOR("Samu Onkalo, Nokia Corporation");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/bh1780gli.c b/drivers/misc/bh1780gli.c
new file mode 100644
index 00000000..54f6f39f
--- /dev/null
+++ b/drivers/misc/bh1780gli.c
@@ -0,0 +1,263 @@
+/*
+ * bh1780gli.c
+ * ROHM Ambient Light Sensor Driver
+ *
+ * Copyright (C) 2010 Texas Instruments
+ * Author: Hemanth V <hemanthv@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+
+#define BH1780_REG_CONTROL 0x80
+#define BH1780_REG_PARTID 0x8A
+#define BH1780_REG_MANFID 0x8B
+#define BH1780_REG_DLOW 0x8C
+#define BH1780_REG_DHIGH 0x8D
+
+#define BH1780_REVMASK (0xf)
+#define BH1780_POWMASK (0x3)
+#define BH1780_POFF (0x0)
+#define BH1780_PON (0x3)
+
+/* power on settling time in ms */
+#define BH1780_PON_DELAY 2
+
+struct bh1780_data {
+ struct i2c_client *client;
+ int power_state;
+ /* lock for sysfs operations */
+ struct mutex lock;
+};
+
+static int bh1780_write(struct bh1780_data *ddata, u8 reg, u8 val, char *msg)
+{
+ int ret = i2c_smbus_write_byte_data(ddata->client, reg, val);
+ if (ret < 0)
+ dev_err(&ddata->client->dev,
+ "i2c_smbus_write_byte_data failed error %d Register (%s)\n",
+ ret, msg);
+ return ret;
+}
+
+static int bh1780_read(struct bh1780_data *ddata, u8 reg, char *msg)
+{
+ int ret = i2c_smbus_read_byte_data(ddata->client, reg);
+ if (ret < 0)
+ dev_err(&ddata->client->dev,
+ "i2c_smbus_read_byte_data failed error %d Register (%s)\n",
+ ret, msg);
+ return ret;
+}
+
+static ssize_t bh1780_show_lux(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct bh1780_data *ddata = platform_get_drvdata(pdev);
+ int lsb, msb;
+
+ lsb = bh1780_read(ddata, BH1780_REG_DLOW, "DLOW");
+ if (lsb < 0)
+ return lsb;
+
+ msb = bh1780_read(ddata, BH1780_REG_DHIGH, "DHIGH");
+ if (msb < 0)
+ return msb;
+
+ return sprintf(buf, "%d\n", (msb << 8) | lsb);
+}
+
+static ssize_t bh1780_show_power_state(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct bh1780_data *ddata = platform_get_drvdata(pdev);
+ int state;
+
+ state = bh1780_read(ddata, BH1780_REG_CONTROL, "CONTROL");
+ if (state < 0)
+ return state;
+
+ return sprintf(buf, "%d\n", state & BH1780_POWMASK);
+}
+
+static ssize_t bh1780_store_power_state(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct bh1780_data *ddata = platform_get_drvdata(pdev);
+ unsigned long val;
+ int error;
+
+ error = strict_strtoul(buf, 0, &val);
+ if (error)
+ return error;
+
+ if (val < BH1780_POFF || val > BH1780_PON)
+ return -EINVAL;
+
+ mutex_lock(&ddata->lock);
+
+ error = bh1780_write(ddata, BH1780_REG_CONTROL, val, "CONTROL");
+ if (error < 0) {
+ mutex_unlock(&ddata->lock);
+ return error;
+ }
+
+ msleep(BH1780_PON_DELAY);
+ ddata->power_state = val;
+ mutex_unlock(&ddata->lock);
+
+ return count;
+}
+
+static DEVICE_ATTR(lux, S_IRUGO, bh1780_show_lux, NULL);
+
+static DEVICE_ATTR(power_state, S_IWUSR | S_IRUGO,
+ bh1780_show_power_state, bh1780_store_power_state);
+
+static struct attribute *bh1780_attributes[] = {
+ &dev_attr_power_state.attr,
+ &dev_attr_lux.attr,
+ NULL
+};
+
+static const struct attribute_group bh1780_attr_group = {
+ .attrs = bh1780_attributes,
+};
+
+static int __devinit bh1780_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct bh1780_data *ddata = NULL;
+ struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE)) {
+ ret = -EIO;
+ goto err_op_failed;
+ }
+
+ ddata = kzalloc(sizeof(struct bh1780_data), GFP_KERNEL);
+ if (ddata == NULL) {
+ ret = -ENOMEM;
+ goto err_op_failed;
+ }
+
+ ddata->client = client;
+ i2c_set_clientdata(client, ddata);
+
+ ret = bh1780_read(ddata, BH1780_REG_PARTID, "PART ID");
+ if (ret < 0)
+ goto err_op_failed;
+
+ dev_info(&client->dev, "Ambient Light Sensor, Rev : %d\n",
+ (ret & BH1780_REVMASK));
+
+ mutex_init(&ddata->lock);
+
+ ret = sysfs_create_group(&client->dev.kobj, &bh1780_attr_group);
+ if (ret)
+ goto err_op_failed;
+
+ return 0;
+
+err_op_failed:
+ kfree(ddata);
+ return ret;
+}
+
+static int __devexit bh1780_remove(struct i2c_client *client)
+{
+ struct bh1780_data *ddata;
+
+ ddata = i2c_get_clientdata(client);
+ sysfs_remove_group(&client->dev.kobj, &bh1780_attr_group);
+ kfree(ddata);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int bh1780_suspend(struct device *dev)
+{
+ struct bh1780_data *ddata;
+ int state, ret;
+ struct i2c_client *client = to_i2c_client(dev);
+
+ ddata = i2c_get_clientdata(client);
+ state = bh1780_read(ddata, BH1780_REG_CONTROL, "CONTROL");
+ if (state < 0)
+ return state;
+
+ ddata->power_state = state & BH1780_POWMASK;
+
+ ret = bh1780_write(ddata, BH1780_REG_CONTROL, BH1780_POFF,
+ "CONTROL");
+
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int bh1780_resume(struct device *dev)
+{
+ struct bh1780_data *ddata;
+ int state, ret;
+ struct i2c_client *client = to_i2c_client(dev);
+
+ ddata = i2c_get_clientdata(client);
+ state = ddata->power_state;
+ ret = bh1780_write(ddata, BH1780_REG_CONTROL, state,
+ "CONTROL");
+
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+static SIMPLE_DEV_PM_OPS(bh1780_pm, bh1780_suspend, bh1780_resume);
+#define BH1780_PMOPS (&bh1780_pm)
+#else
+#define BH1780_PMOPS NULL
+#endif /* CONFIG_PM */
+
+static const struct i2c_device_id bh1780_id[] = {
+ { "bh1780", 0 },
+ { },
+};
+
+static struct i2c_driver bh1780_driver = {
+ .probe = bh1780_probe,
+ .remove = bh1780_remove,
+ .id_table = bh1780_id,
+ .driver = {
+ .name = "bh1780",
+ .pm = BH1780_PMOPS,
+ },
+};
+
+module_i2c_driver(bh1780_driver);
+
+MODULE_DESCRIPTION("BH1780GLI Ambient Light Sensor Driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Hemanth V <hemanthv@ti.com>");
diff --git a/drivers/misc/bmp085.c b/drivers/misc/bmp085.c
new file mode 100644
index 00000000..76c30646
--- /dev/null
+++ b/drivers/misc/bmp085.c
@@ -0,0 +1,472 @@
+/* Copyright (c) 2010 Christoph Mair <christoph.mair@gmail.com>
+
+ This driver supports the bmp085 digital barometric pressure
+ and temperature sensor from Bosch Sensortec. The datasheet
+ is available from their website:
+ http://www.bosch-sensortec.com/content/language1/downloads/BST-BMP085-DS000-05.pdf
+
+ A pressure measurement is issued by reading from pressure0_input.
+ The return value ranges from 30000 to 110000 pascal with a resulution
+ of 1 pascal (0.01 millibar) which enables measurements from 9000m above
+ to 500m below sea level.
+
+ The temperature can be read from temp0_input. Values range from
+ -400 to 850 representing the ambient temperature in degree celsius
+ multiplied by 10.The resolution is 0.1 celsius.
+
+ Because ambient pressure is temperature dependent, a temperature
+ measurement will be executed automatically even if the user is reading
+ from pressure0_input. This happens if the last temperature measurement
+ has been executed more then one second ago.
+
+ To decrease RMS noise from pressure measurements, the bmp085 can
+ autonomously calculate the average of up to eight samples. This is
+ set up by writing to the oversampling sysfs file. Accepted values
+ are 0, 1, 2 and 3. 2^x when x is the value written to this file
+ specifies the number of samples used to calculate the ambient pressure.
+ RMS noise is specified with six pascal (without averaging) and decreases
+ down to 3 pascal when using an oversampling setting of 3.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+
+
+#define BMP085_I2C_ADDRESS 0x77
+#define BMP085_CHIP_ID 0x55
+
+#define BMP085_CALIBRATION_DATA_START 0xAA
+#define BMP085_CALIBRATION_DATA_LENGTH 11 /* 16 bit values */
+#define BMP085_CHIP_ID_REG 0xD0
+#define BMP085_VERSION_REG 0xD1
+#define BMP085_CTRL_REG 0xF4
+#define BMP085_TEMP_MEASUREMENT 0x2E
+#define BMP085_PRESSURE_MEASUREMENT 0x34
+#define BMP085_CONVERSION_REGISTER_MSB 0xF6
+#define BMP085_CONVERSION_REGISTER_LSB 0xF7
+#define BMP085_CONVERSION_REGISTER_XLSB 0xF8
+#define BMP085_TEMP_CONVERSION_TIME 5
+
+#define BMP085_CLIENT_NAME "bmp085"
+
+
+static const unsigned short normal_i2c[] = { BMP085_I2C_ADDRESS,
+ I2C_CLIENT_END };
+
+struct bmp085_calibration_data {
+ s16 AC1, AC2, AC3;
+ u16 AC4, AC5, AC6;
+ s16 B1, B2;
+ s16 MB, MC, MD;
+};
+
+
+/* Each client has this additional data */
+struct bmp085_data {
+ struct i2c_client *client;
+ struct mutex lock;
+ struct bmp085_calibration_data calibration;
+ u32 raw_temperature;
+ u32 raw_pressure;
+ unsigned char oversampling_setting;
+ unsigned long last_temp_measurement;
+ s32 b6; /* calculated temperature correction coefficient */
+};
+
+
+static s32 bmp085_read_calibration_data(struct i2c_client *client)
+{
+ u16 tmp[BMP085_CALIBRATION_DATA_LENGTH];
+ struct bmp085_data *data = i2c_get_clientdata(client);
+ struct bmp085_calibration_data *cali = &(data->calibration);
+ s32 status = i2c_smbus_read_i2c_block_data(client,
+ BMP085_CALIBRATION_DATA_START,
+ BMP085_CALIBRATION_DATA_LENGTH*sizeof(u16),
+ (u8 *)tmp);
+ if (status < 0)
+ return status;
+
+ if (status != BMP085_CALIBRATION_DATA_LENGTH*sizeof(u16))
+ return -EIO;
+
+ cali->AC1 = be16_to_cpu(tmp[0]);
+ cali->AC2 = be16_to_cpu(tmp[1]);
+ cali->AC3 = be16_to_cpu(tmp[2]);
+ cali->AC4 = be16_to_cpu(tmp[3]);
+ cali->AC5 = be16_to_cpu(tmp[4]);
+ cali->AC6 = be16_to_cpu(tmp[5]);
+ cali->B1 = be16_to_cpu(tmp[6]);
+ cali->B2 = be16_to_cpu(tmp[7]);
+ cali->MB = be16_to_cpu(tmp[8]);
+ cali->MC = be16_to_cpu(tmp[9]);
+ cali->MD = be16_to_cpu(tmp[10]);
+ return 0;
+}
+
+
+static s32 bmp085_update_raw_temperature(struct bmp085_data *data)
+{
+ u16 tmp;
+ s32 status;
+
+ mutex_lock(&data->lock);
+ status = i2c_smbus_write_byte_data(data->client, BMP085_CTRL_REG,
+ BMP085_TEMP_MEASUREMENT);
+ if (status != 0) {
+ dev_err(&data->client->dev,
+ "Error while requesting temperature measurement.\n");
+ goto exit;
+ }
+ msleep(BMP085_TEMP_CONVERSION_TIME);
+
+ status = i2c_smbus_read_i2c_block_data(data->client,
+ BMP085_CONVERSION_REGISTER_MSB, sizeof(tmp), (u8 *)&tmp);
+ if (status < 0)
+ goto exit;
+ if (status != sizeof(tmp)) {
+ dev_err(&data->client->dev,
+ "Error while reading temperature measurement result\n");
+ status = -EIO;
+ goto exit;
+ }
+ data->raw_temperature = be16_to_cpu(tmp);
+ data->last_temp_measurement = jiffies;
+ status = 0; /* everything ok, return 0 */
+
+exit:
+ mutex_unlock(&data->lock);
+ return status;
+}
+
+static s32 bmp085_update_raw_pressure(struct bmp085_data *data)
+{
+ u32 tmp = 0;
+ s32 status;
+
+ mutex_lock(&data->lock);
+ status = i2c_smbus_write_byte_data(data->client, BMP085_CTRL_REG,
+ BMP085_PRESSURE_MEASUREMENT + (data->oversampling_setting<<6));
+ if (status != 0) {
+ dev_err(&data->client->dev,
+ "Error while requesting pressure measurement.\n");
+ goto exit;
+ }
+
+ /* wait for the end of conversion */
+ msleep(2+(3 << data->oversampling_setting));
+
+ /* copy data into a u32 (4 bytes), but skip the first byte. */
+ status = i2c_smbus_read_i2c_block_data(data->client,
+ BMP085_CONVERSION_REGISTER_MSB, 3, ((u8 *)&tmp)+1);
+ if (status < 0)
+ goto exit;
+ if (status != 3) {
+ dev_err(&data->client->dev,
+ "Error while reading pressure measurement results\n");
+ status = -EIO;
+ goto exit;
+ }
+ data->raw_pressure = be32_to_cpu((tmp));
+ data->raw_pressure >>= (8-data->oversampling_setting);
+ status = 0; /* everything ok, return 0 */
+
+exit:
+ mutex_unlock(&data->lock);
+ return status;
+}
+
+
+/*
+ * This function starts the temperature measurement and returns the value
+ * in tenth of a degree celsius.
+ */
+static s32 bmp085_get_temperature(struct bmp085_data *data, int *temperature)
+{
+ struct bmp085_calibration_data *cali = &data->calibration;
+ long x1, x2;
+ int status;
+
+ status = bmp085_update_raw_temperature(data);
+ if (status != 0)
+ goto exit;
+
+ x1 = ((data->raw_temperature - cali->AC6) * cali->AC5) >> 15;
+ x2 = (cali->MC << 11) / (x1 + cali->MD);
+ data->b6 = x1 + x2 - 4000;
+ /* if NULL just update b6. Used for pressure only measurements */
+ if (temperature != NULL)
+ *temperature = (x1+x2+8) >> 4;
+
+exit:
+ return status;
+}
+
+/*
+ * This function starts the pressure measurement and returns the value
+ * in millibar. Since the pressure depends on the ambient temperature,
+ * a temperature measurement is executed if the last known value is older
+ * than one second.
+ */
+static s32 bmp085_get_pressure(struct bmp085_data *data, int *pressure)
+{
+ struct bmp085_calibration_data *cali = &data->calibration;
+ s32 x1, x2, x3, b3;
+ u32 b4, b7;
+ s32 p;
+ int status;
+
+ /* alt least every second force an update of the ambient temperature */
+ if (data->last_temp_measurement == 0 ||
+ time_is_before_jiffies(data->last_temp_measurement + 1*HZ)) {
+ status = bmp085_get_temperature(data, NULL);
+ if (status != 0)
+ goto exit;
+ }
+
+ status = bmp085_update_raw_pressure(data);
+ if (status != 0)
+ goto exit;
+
+ x1 = (data->b6 * data->b6) >> 12;
+ x1 *= cali->B2;
+ x1 >>= 11;
+
+ x2 = cali->AC2 * data->b6;
+ x2 >>= 11;
+
+ x3 = x1 + x2;
+
+ b3 = (((((s32)cali->AC1) * 4 + x3) << data->oversampling_setting) + 2);
+ b3 >>= 2;
+
+ x1 = (cali->AC3 * data->b6) >> 13;
+ x2 = (cali->B1 * ((data->b6 * data->b6) >> 12)) >> 16;
+ x3 = (x1 + x2 + 2) >> 2;
+ b4 = (cali->AC4 * (u32)(x3 + 32768)) >> 15;
+
+ b7 = ((u32)data->raw_pressure - b3) *
+ (50000 >> data->oversampling_setting);
+ p = ((b7 < 0x80000000) ? ((b7 << 1) / b4) : ((b7 / b4) * 2));
+
+ x1 = p >> 8;
+ x1 *= x1;
+ x1 = (x1 * 3038) >> 16;
+ x2 = (-7357 * p) >> 16;
+ p += (x1 + x2 + 3791) >> 4;
+
+ *pressure = p;
+
+exit:
+ return status;
+}
+
+/*
+ * This function sets the chip-internal oversampling. Valid values are 0..3.
+ * The chip will use 2^oversampling samples for internal averaging.
+ * This influences the measurement time and the accuracy; larger values
+ * increase both. The datasheet gives on overview on how measurement time,
+ * accuracy and noise correlate.
+ */
+static void bmp085_set_oversampling(struct bmp085_data *data,
+ unsigned char oversampling)
+{
+ if (oversampling > 3)
+ oversampling = 3;
+ data->oversampling_setting = oversampling;
+}
+
+/*
+ * Returns the currently selected oversampling. Range: 0..3
+ */
+static unsigned char bmp085_get_oversampling(struct bmp085_data *data)
+{
+ return data->oversampling_setting;
+}
+
+/* sysfs callbacks */
+static ssize_t set_oversampling(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct bmp085_data *data = i2c_get_clientdata(client);
+ unsigned long oversampling;
+ int success = strict_strtoul(buf, 10, &oversampling);
+ if (success == 0) {
+ bmp085_set_oversampling(data, oversampling);
+ return count;
+ }
+ return success;
+}
+
+static ssize_t show_oversampling(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct bmp085_data *data = i2c_get_clientdata(client);
+ return sprintf(buf, "%u\n", bmp085_get_oversampling(data));
+}
+static DEVICE_ATTR(oversampling, S_IWUSR | S_IRUGO,
+ show_oversampling, set_oversampling);
+
+
+static ssize_t show_temperature(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int temperature;
+ int status;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct bmp085_data *data = i2c_get_clientdata(client);
+
+ status = bmp085_get_temperature(data, &temperature);
+ if (status != 0)
+ return status;
+ else
+ return sprintf(buf, "%d\n", temperature);
+}
+static DEVICE_ATTR(temp0_input, S_IRUGO, show_temperature, NULL);
+
+
+static ssize_t show_pressure(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int pressure;
+ int status;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct bmp085_data *data = i2c_get_clientdata(client);
+
+ status = bmp085_get_pressure(data, &pressure);
+ if (status != 0)
+ return status;
+ else
+ return sprintf(buf, "%d\n", pressure);
+}
+static DEVICE_ATTR(pressure0_input, S_IRUGO, show_pressure, NULL);
+
+
+static struct attribute *bmp085_attributes[] = {
+ &dev_attr_temp0_input.attr,
+ &dev_attr_pressure0_input.attr,
+ &dev_attr_oversampling.attr,
+ NULL
+};
+
+static const struct attribute_group bmp085_attr_group = {
+ .attrs = bmp085_attributes,
+};
+
+static int bmp085_detect(struct i2c_client *client, struct i2c_board_info *info)
+{
+ if (client->addr != BMP085_I2C_ADDRESS)
+ return -ENODEV;
+
+ if (i2c_smbus_read_byte_data(client, BMP085_CHIP_ID_REG) != BMP085_CHIP_ID)
+ return -ENODEV;
+
+ return 0;
+}
+
+static int bmp085_init_client(struct i2c_client *client)
+{
+ unsigned char version;
+ int status;
+ struct bmp085_data *data = i2c_get_clientdata(client);
+ data->client = client;
+ status = bmp085_read_calibration_data(client);
+ if (status != 0)
+ goto exit;
+ version = i2c_smbus_read_byte_data(client, BMP085_VERSION_REG);
+ data->last_temp_measurement = 0;
+ data->oversampling_setting = 3;
+ mutex_init(&data->lock);
+ dev_info(&data->client->dev, "BMP085 ver. %d.%d found.\n",
+ (version & 0x0F), (version & 0xF0) >> 4);
+exit:
+ return status;
+}
+
+static int __devinit bmp085_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct bmp085_data *data;
+ int err = 0;
+
+ data = kzalloc(sizeof(struct bmp085_data), GFP_KERNEL);
+ if (!data) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ /* default settings after POR */
+ data->oversampling_setting = 0x00;
+
+ i2c_set_clientdata(client, data);
+
+ /* Initialize the BMP085 chip */
+ err = bmp085_init_client(client);
+ if (err != 0)
+ goto exit_free;
+
+ /* Register sysfs hooks */
+ err = sysfs_create_group(&client->dev.kobj, &bmp085_attr_group);
+ if (err)
+ goto exit_free;
+
+ dev_info(&data->client->dev, "Successfully initialized bmp085!\n");
+ goto exit;
+
+exit_free:
+ kfree(data);
+exit:
+ return err;
+}
+
+static int __devexit bmp085_remove(struct i2c_client *client)
+{
+ sysfs_remove_group(&client->dev.kobj, &bmp085_attr_group);
+ kfree(i2c_get_clientdata(client));
+ return 0;
+}
+
+static const struct i2c_device_id bmp085_id[] = {
+ { "bmp085", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, bmp085_id);
+
+static struct i2c_driver bmp085_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "bmp085"
+ },
+ .id_table = bmp085_id,
+ .probe = bmp085_probe,
+ .remove = __devexit_p(bmp085_remove),
+
+ .detect = bmp085_detect,
+ .address_list = normal_i2c
+};
+
+module_i2c_driver(bmp085_driver);
+
+MODULE_AUTHOR("Christoph Mair <christoph.mair@gmail.com");
+MODULE_DESCRIPTION("BMP085 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/c2port/Kconfig b/drivers/misc/c2port/Kconfig
new file mode 100644
index 00000000..e46af9a5
--- /dev/null
+++ b/drivers/misc/c2port/Kconfig
@@ -0,0 +1,35 @@
+#
+# C2 port devices
+#
+
+menuconfig C2PORT
+ tristate "Silicon Labs C2 port support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ default no
+ help
+ This option enables support for Silicon Labs C2 port used to
+ program Silicon micro controller chips (and other 8051 compatible).
+
+ If your board have no such micro controllers you don't need this
+ interface at all.
+
+ To compile this driver as a module, choose M here: the module will
+ be called c2port_core. Note that you also need a client module
+ usually called c2port-*.
+
+ If you are not sure, say N here.
+
+if C2PORT
+
+config C2PORT_DURAMAR_2150
+ tristate "C2 port support for Eurotech's Duramar 2150 (EXPERIMENTAL)"
+ depends on X86 && C2PORT
+ default no
+ help
+ This option enables C2 support for the Eurotech's Duramar 2150
+ on board micro controller.
+
+ To compile this driver as a module, choose M here: the module will
+ be called c2port-duramar2150.
+
+endif # C2PORT
diff --git a/drivers/misc/c2port/Makefile b/drivers/misc/c2port/Makefile
new file mode 100644
index 00000000..3b2cf43d
--- /dev/null
+++ b/drivers/misc/c2port/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_C2PORT) += core.o
+
+obj-$(CONFIG_C2PORT_DURAMAR_2150) += c2port-duramar2150.o
diff --git a/drivers/misc/c2port/c2port-duramar2150.c b/drivers/misc/c2port/c2port-duramar2150.c
new file mode 100644
index 00000000..5484301d
--- /dev/null
+++ b/drivers/misc/c2port/c2port-duramar2150.c
@@ -0,0 +1,159 @@
+/*
+ * Silicon Labs C2 port Linux support for Eurotech Duramar 2150
+ *
+ * Copyright (c) 2008 Rodolfo Giometti <giometti@linux.it>
+ * Copyright (c) 2008 Eurotech S.p.A. <info@eurotech.it>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation
+ */
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/c2port.h>
+
+#define DATA_PORT 0x325
+#define DIR_PORT 0x326
+#define C2D (1 << 0)
+#define C2CK (1 << 1)
+
+static DEFINE_MUTEX(update_lock);
+
+/*
+ * C2 port operations
+ */
+
+static void duramar2150_c2port_access(struct c2port_device *dev, int status)
+{
+ u8 v;
+
+ mutex_lock(&update_lock);
+
+ v = inb(DIR_PORT);
+
+ /* 0 = input, 1 = output */
+ if (status)
+ outb(v | (C2D | C2CK), DIR_PORT);
+ else
+ /* When access is "off" is important that both lines are set
+ * as inputs or hi-impedance */
+ outb(v & ~(C2D | C2CK), DIR_PORT);
+
+ mutex_unlock(&update_lock);
+}
+
+static void duramar2150_c2port_c2d_dir(struct c2port_device *dev, int dir)
+{
+ u8 v;
+
+ mutex_lock(&update_lock);
+
+ v = inb(DIR_PORT);
+
+ if (dir)
+ outb(v & ~C2D, DIR_PORT);
+ else
+ outb(v | C2D, DIR_PORT);
+
+ mutex_unlock(&update_lock);
+}
+
+static int duramar2150_c2port_c2d_get(struct c2port_device *dev)
+{
+ return inb(DATA_PORT) & C2D;
+}
+
+static void duramar2150_c2port_c2d_set(struct c2port_device *dev, int status)
+{
+ u8 v;
+
+ mutex_lock(&update_lock);
+
+ v = inb(DATA_PORT);
+
+ if (status)
+ outb(v | C2D, DATA_PORT);
+ else
+ outb(v & ~C2D, DATA_PORT);
+
+ mutex_unlock(&update_lock);
+}
+
+static void duramar2150_c2port_c2ck_set(struct c2port_device *dev, int status)
+{
+ u8 v;
+
+ mutex_lock(&update_lock);
+
+ v = inb(DATA_PORT);
+
+ if (status)
+ outb(v | C2CK, DATA_PORT);
+ else
+ outb(v & ~C2CK, DATA_PORT);
+
+ mutex_unlock(&update_lock);
+}
+
+static struct c2port_ops duramar2150_c2port_ops = {
+ .block_size = 512, /* bytes */
+ .blocks_num = 30, /* total flash size: 15360 bytes */
+
+ .access = duramar2150_c2port_access,
+ .c2d_dir = duramar2150_c2port_c2d_dir,
+ .c2d_get = duramar2150_c2port_c2d_get,
+ .c2d_set = duramar2150_c2port_c2d_set,
+ .c2ck_set = duramar2150_c2port_c2ck_set,
+};
+
+static struct c2port_device *duramar2150_c2port_dev;
+
+/*
+ * Module stuff
+ */
+
+static int __init duramar2150_c2port_init(void)
+{
+ struct resource *res;
+ int ret = 0;
+
+ res = request_region(0x325, 2, "c2port");
+ if (!res)
+ return -EBUSY;
+
+ duramar2150_c2port_dev = c2port_device_register("uc",
+ &duramar2150_c2port_ops, NULL);
+ if (!duramar2150_c2port_dev) {
+ ret = -ENODEV;
+ goto free_region;
+ }
+
+ return 0;
+
+free_region:
+ release_region(0x325, 2);
+ return ret;
+}
+
+static void __exit duramar2150_c2port_exit(void)
+{
+ /* Setup the GPIOs as input by default (access = 0) */
+ duramar2150_c2port_access(duramar2150_c2port_dev, 0);
+
+ c2port_device_unregister(duramar2150_c2port_dev);
+
+ release_region(0x325, 2);
+}
+
+module_init(duramar2150_c2port_init);
+module_exit(duramar2150_c2port_exit);
+
+MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>");
+MODULE_DESCRIPTION("Silicon Labs C2 port Linux support for Duramar 2150");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
new file mode 100644
index 00000000..f428d86b
--- /dev/null
+++ b/drivers/misc/c2port/core.c
@@ -0,0 +1,1006 @@
+/*
+ * Silicon Labs C2 port core Linux support
+ *
+ * Copyright (c) 2007 Rodolfo Giometti <giometti@linux.it>
+ * Copyright (c) 2007 Eurotech S.p.A. <info@eurotech.it>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/kmemcheck.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/idr.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include <linux/c2port.h>
+
+#define DRIVER_NAME "c2port"
+#define DRIVER_VERSION "0.51.0"
+
+static DEFINE_SPINLOCK(c2port_idr_lock);
+static DEFINE_IDR(c2port_idr);
+
+/*
+ * Local variables
+ */
+
+static struct class *c2port_class;
+
+/*
+ * C2 registers & commands defines
+ */
+
+/* C2 registers */
+#define C2PORT_DEVICEID 0x00
+#define C2PORT_REVID 0x01
+#define C2PORT_FPCTL 0x02
+#define C2PORT_FPDAT 0xB4
+
+/* C2 interface commands */
+#define C2PORT_GET_VERSION 0x01
+#define C2PORT_DEVICE_ERASE 0x03
+#define C2PORT_BLOCK_READ 0x06
+#define C2PORT_BLOCK_WRITE 0x07
+#define C2PORT_PAGE_ERASE 0x08
+
+/* C2 status return codes */
+#define C2PORT_INVALID_COMMAND 0x00
+#define C2PORT_COMMAND_FAILED 0x02
+#define C2PORT_COMMAND_OK 0x0d
+
+/*
+ * C2 port low level signal managements
+ */
+
+static void c2port_reset(struct c2port_device *dev)
+{
+ struct c2port_ops *ops = dev->ops;
+
+ /* To reset the device we have to keep clock line low for at least
+ * 20us.
+ */
+ local_irq_disable();
+ ops->c2ck_set(dev, 0);
+ udelay(25);
+ ops->c2ck_set(dev, 1);
+ local_irq_enable();
+
+ udelay(1);
+}
+
+static void c2port_strobe_ck(struct c2port_device *dev)
+{
+ struct c2port_ops *ops = dev->ops;
+
+ /* During hi-low-hi transition we disable local IRQs to avoid
+ * interructions since C2 port specification says that it must be
+ * shorter than 5us, otherwise the microcontroller may consider
+ * it as a reset signal!
+ */
+ local_irq_disable();
+ ops->c2ck_set(dev, 0);
+ udelay(1);
+ ops->c2ck_set(dev, 1);
+ local_irq_enable();
+
+ udelay(1);
+}
+
+/*
+ * C2 port basic functions
+ */
+
+static void c2port_write_ar(struct c2port_device *dev, u8 addr)
+{
+ struct c2port_ops *ops = dev->ops;
+ int i;
+
+ /* START field */
+ c2port_strobe_ck(dev);
+
+ /* INS field (11b, LSB first) */
+ ops->c2d_dir(dev, 0);
+ ops->c2d_set(dev, 1);
+ c2port_strobe_ck(dev);
+ ops->c2d_set(dev, 1);
+ c2port_strobe_ck(dev);
+
+ /* ADDRESS field */
+ for (i = 0; i < 8; i++) {
+ ops->c2d_set(dev, addr & 0x01);
+ c2port_strobe_ck(dev);
+
+ addr >>= 1;
+ }
+
+ /* STOP field */
+ ops->c2d_dir(dev, 1);
+ c2port_strobe_ck(dev);
+}
+
+static int c2port_read_ar(struct c2port_device *dev, u8 *addr)
+{
+ struct c2port_ops *ops = dev->ops;
+ int i;
+
+ /* START field */
+ c2port_strobe_ck(dev);
+
+ /* INS field (10b, LSB first) */
+ ops->c2d_dir(dev, 0);
+ ops->c2d_set(dev, 0);
+ c2port_strobe_ck(dev);
+ ops->c2d_set(dev, 1);
+ c2port_strobe_ck(dev);
+
+ /* ADDRESS field */
+ ops->c2d_dir(dev, 1);
+ *addr = 0;
+ for (i = 0; i < 8; i++) {
+ *addr >>= 1; /* shift in 8-bit ADDRESS field LSB first */
+
+ c2port_strobe_ck(dev);
+ if (ops->c2d_get(dev))
+ *addr |= 0x80;
+ }
+
+ /* STOP field */
+ c2port_strobe_ck(dev);
+
+ return 0;
+}
+
+static int c2port_write_dr(struct c2port_device *dev, u8 data)
+{
+ struct c2port_ops *ops = dev->ops;
+ int timeout, i;
+
+ /* START field */
+ c2port_strobe_ck(dev);
+
+ /* INS field (01b, LSB first) */
+ ops->c2d_dir(dev, 0);
+ ops->c2d_set(dev, 1);
+ c2port_strobe_ck(dev);
+ ops->c2d_set(dev, 0);
+ c2port_strobe_ck(dev);
+
+ /* LENGTH field (00b, LSB first -> 1 byte) */
+ ops->c2d_set(dev, 0);
+ c2port_strobe_ck(dev);
+ ops->c2d_set(dev, 0);
+ c2port_strobe_ck(dev);
+
+ /* DATA field */
+ for (i = 0; i < 8; i++) {
+ ops->c2d_set(dev, data & 0x01);
+ c2port_strobe_ck(dev);
+
+ data >>= 1;
+ }
+
+ /* WAIT field */
+ ops->c2d_dir(dev, 1);
+ timeout = 20;
+ do {
+ c2port_strobe_ck(dev);
+ if (ops->c2d_get(dev))
+ break;
+
+ udelay(1);
+ } while (--timeout > 0);
+ if (timeout == 0)
+ return -EIO;
+
+ /* STOP field */
+ c2port_strobe_ck(dev);
+
+ return 0;
+}
+
+static int c2port_read_dr(struct c2port_device *dev, u8 *data)
+{
+ struct c2port_ops *ops = dev->ops;
+ int timeout, i;
+
+ /* START field */
+ c2port_strobe_ck(dev);
+
+ /* INS field (00b, LSB first) */
+ ops->c2d_dir(dev, 0);
+ ops->c2d_set(dev, 0);
+ c2port_strobe_ck(dev);
+ ops->c2d_set(dev, 0);
+ c2port_strobe_ck(dev);
+
+ /* LENGTH field (00b, LSB first -> 1 byte) */
+ ops->c2d_set(dev, 0);
+ c2port_strobe_ck(dev);
+ ops->c2d_set(dev, 0);
+ c2port_strobe_ck(dev);
+
+ /* WAIT field */
+ ops->c2d_dir(dev, 1);
+ timeout = 20;
+ do {
+ c2port_strobe_ck(dev);
+ if (ops->c2d_get(dev))
+ break;
+
+ udelay(1);
+ } while (--timeout > 0);
+ if (timeout == 0)
+ return -EIO;
+
+ /* DATA field */
+ *data = 0;
+ for (i = 0; i < 8; i++) {
+ *data >>= 1; /* shift in 8-bit DATA field LSB first */
+
+ c2port_strobe_ck(dev);
+ if (ops->c2d_get(dev))
+ *data |= 0x80;
+ }
+
+ /* STOP field */
+ c2port_strobe_ck(dev);
+
+ return 0;
+}
+
+static int c2port_poll_in_busy(struct c2port_device *dev)
+{
+ u8 addr;
+ int ret, timeout = 20;
+
+ do {
+ ret = (c2port_read_ar(dev, &addr));
+ if (ret < 0)
+ return -EIO;
+
+ if (!(addr & 0x02))
+ break;
+
+ udelay(1);
+ } while (--timeout > 0);
+ if (timeout == 0)
+ return -EIO;
+
+ return 0;
+}
+
+static int c2port_poll_out_ready(struct c2port_device *dev)
+{
+ u8 addr;
+ int ret, timeout = 10000; /* erase flash needs long time... */
+
+ do {
+ ret = (c2port_read_ar(dev, &addr));
+ if (ret < 0)
+ return -EIO;
+
+ if (addr & 0x01)
+ break;
+
+ udelay(1);
+ } while (--timeout > 0);
+ if (timeout == 0)
+ return -EIO;
+
+ return 0;
+}
+
+/*
+ * sysfs methods
+ */
+
+static ssize_t c2port_show_name(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct c2port_device *c2dev = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", c2dev->name);
+}
+
+static ssize_t c2port_show_flash_blocks_num(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct c2port_device *c2dev = dev_get_drvdata(dev);
+ struct c2port_ops *ops = c2dev->ops;
+
+ return sprintf(buf, "%d\n", ops->blocks_num);
+}
+
+static ssize_t c2port_show_flash_block_size(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct c2port_device *c2dev = dev_get_drvdata(dev);
+ struct c2port_ops *ops = c2dev->ops;
+
+ return sprintf(buf, "%d\n", ops->block_size);
+}
+
+static ssize_t c2port_show_flash_size(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct c2port_device *c2dev = dev_get_drvdata(dev);
+ struct c2port_ops *ops = c2dev->ops;
+
+ return sprintf(buf, "%d\n", ops->blocks_num * ops->block_size);
+}
+
+static ssize_t c2port_show_access(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct c2port_device *c2dev = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", c2dev->access);
+}
+
+static ssize_t c2port_store_access(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct c2port_device *c2dev = dev_get_drvdata(dev);
+ struct c2port_ops *ops = c2dev->ops;
+ int status, ret;
+
+ ret = sscanf(buf, "%d", &status);
+ if (ret != 1)
+ return -EINVAL;
+
+ mutex_lock(&c2dev->mutex);
+
+ c2dev->access = !!status;
+
+ /* If access is "on" clock should be HIGH _before_ setting the line
+ * as output and data line should be set as INPUT anyway */
+ if (c2dev->access)
+ ops->c2ck_set(c2dev, 1);
+ ops->access(c2dev, c2dev->access);
+ if (c2dev->access)
+ ops->c2d_dir(c2dev, 1);
+
+ mutex_unlock(&c2dev->mutex);
+
+ return count;
+}
+
+static ssize_t c2port_store_reset(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct c2port_device *c2dev = dev_get_drvdata(dev);
+
+ /* Check the device access status */
+ if (!c2dev->access)
+ return -EBUSY;
+
+ mutex_lock(&c2dev->mutex);
+
+ c2port_reset(c2dev);
+ c2dev->flash_access = 0;
+
+ mutex_unlock(&c2dev->mutex);
+
+ return count;
+}
+
+static ssize_t __c2port_show_dev_id(struct c2port_device *dev, char *buf)
+{
+ u8 data;
+ int ret;
+
+ /* Select DEVICEID register for C2 data register accesses */
+ c2port_write_ar(dev, C2PORT_DEVICEID);
+
+ /* Read and return the device ID register */
+ ret = c2port_read_dr(dev, &data);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", data);
+}
+
+static ssize_t c2port_show_dev_id(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct c2port_device *c2dev = dev_get_drvdata(dev);
+ ssize_t ret;
+
+ /* Check the device access status */
+ if (!c2dev->access)
+ return -EBUSY;
+
+ mutex_lock(&c2dev->mutex);
+ ret = __c2port_show_dev_id(c2dev, buf);
+ mutex_unlock(&c2dev->mutex);
+
+ if (ret < 0)
+ dev_err(dev, "cannot read from %s\n", c2dev->name);
+
+ return ret;
+}
+
+static ssize_t __c2port_show_rev_id(struct c2port_device *dev, char *buf)
+{
+ u8 data;
+ int ret;
+
+ /* Select REVID register for C2 data register accesses */
+ c2port_write_ar(dev, C2PORT_REVID);
+
+ /* Read and return the revision ID register */
+ ret = c2port_read_dr(dev, &data);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", data);
+}
+
+static ssize_t c2port_show_rev_id(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct c2port_device *c2dev = dev_get_drvdata(dev);
+ ssize_t ret;
+
+ /* Check the device access status */
+ if (!c2dev->access)
+ return -EBUSY;
+
+ mutex_lock(&c2dev->mutex);
+ ret = __c2port_show_rev_id(c2dev, buf);
+ mutex_unlock(&c2dev->mutex);
+
+ if (ret < 0)
+ dev_err(c2dev->dev, "cannot read from %s\n", c2dev->name);
+
+ return ret;
+}
+
+static ssize_t c2port_show_flash_access(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct c2port_device *c2dev = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", c2dev->flash_access);
+}
+
+static ssize_t __c2port_store_flash_access(struct c2port_device *dev,
+ int status)
+{
+ int ret;
+
+ /* Check the device access status */
+ if (!dev->access)
+ return -EBUSY;
+
+ dev->flash_access = !!status;
+
+ /* If flash_access is off we have nothing to do... */
+ if (dev->flash_access == 0)
+ return 0;
+
+ /* Target the C2 flash programming control register for C2 data
+ * register access */
+ c2port_write_ar(dev, C2PORT_FPCTL);
+
+ /* Write the first keycode to enable C2 Flash programming */
+ ret = c2port_write_dr(dev, 0x02);
+ if (ret < 0)
+ return ret;
+
+ /* Write the second keycode to enable C2 Flash programming */
+ ret = c2port_write_dr(dev, 0x01);
+ if (ret < 0)
+ return ret;
+
+ /* Delay for at least 20ms to ensure the target is ready for
+ * C2 flash programming */
+ mdelay(25);
+
+ return 0;
+}
+
+static ssize_t c2port_store_flash_access(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct c2port_device *c2dev = dev_get_drvdata(dev);
+ int status;
+ ssize_t ret;
+
+ ret = sscanf(buf, "%d", &status);
+ if (ret != 1)
+ return -EINVAL;
+
+ mutex_lock(&c2dev->mutex);
+ ret = __c2port_store_flash_access(c2dev, status);
+ mutex_unlock(&c2dev->mutex);
+
+ if (ret < 0) {
+ dev_err(c2dev->dev, "cannot enable %s flash programming\n",
+ c2dev->name);
+ return ret;
+ }
+
+ return count;
+}
+
+static ssize_t __c2port_write_flash_erase(struct c2port_device *dev)
+{
+ u8 status;
+ int ret;
+
+ /* Target the C2 flash programming data register for C2 data register
+ * access.
+ */
+ c2port_write_ar(dev, C2PORT_FPDAT);
+
+ /* Send device erase command */
+ c2port_write_dr(dev, C2PORT_DEVICE_ERASE);
+
+ /* Wait for input acknowledge */
+ ret = c2port_poll_in_busy(dev);
+ if (ret < 0)
+ return ret;
+
+ /* Should check status before starting FLASH access sequence */
+
+ /* Wait for status information */
+ ret = c2port_poll_out_ready(dev);
+ if (ret < 0)
+ return ret;
+
+ /* Read flash programming interface status */
+ ret = c2port_read_dr(dev, &status);
+ if (ret < 0)
+ return ret;
+ if (status != C2PORT_COMMAND_OK)
+ return -EBUSY;
+
+ /* Send a three-byte arming sequence to enable the device erase.
+ * If the sequence is not received correctly, the command will be
+ * ignored.
+ * Sequence is: 0xde, 0xad, 0xa5.
+ */
+ c2port_write_dr(dev, 0xde);
+ ret = c2port_poll_in_busy(dev);
+ if (ret < 0)
+ return ret;
+ c2port_write_dr(dev, 0xad);
+ ret = c2port_poll_in_busy(dev);
+ if (ret < 0)
+ return ret;
+ c2port_write_dr(dev, 0xa5);
+ ret = c2port_poll_in_busy(dev);
+ if (ret < 0)
+ return ret;
+
+ ret = c2port_poll_out_ready(dev);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static ssize_t c2port_store_flash_erase(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct c2port_device *c2dev = dev_get_drvdata(dev);
+ int ret;
+
+ /* Check the device and flash access status */
+ if (!c2dev->access || !c2dev->flash_access)
+ return -EBUSY;
+
+ mutex_lock(&c2dev->mutex);
+ ret = __c2port_write_flash_erase(c2dev);
+ mutex_unlock(&c2dev->mutex);
+
+ if (ret < 0) {
+ dev_err(c2dev->dev, "cannot erase %s flash\n", c2dev->name);
+ return ret;
+ }
+
+ return count;
+}
+
+static ssize_t __c2port_read_flash_data(struct c2port_device *dev,
+ char *buffer, loff_t offset, size_t count)
+{
+ struct c2port_ops *ops = dev->ops;
+ u8 status, nread = 128;
+ int i, ret;
+
+ /* Check for flash end */
+ if (offset >= ops->block_size * ops->blocks_num)
+ return 0;
+
+ if (ops->block_size * ops->blocks_num - offset < nread)
+ nread = ops->block_size * ops->blocks_num - offset;
+ if (count < nread)
+ nread = count;
+ if (nread == 0)
+ return nread;
+
+ /* Target the C2 flash programming data register for C2 data register
+ * access */
+ c2port_write_ar(dev, C2PORT_FPDAT);
+
+ /* Send flash block read command */
+ c2port_write_dr(dev, C2PORT_BLOCK_READ);
+
+ /* Wait for input acknowledge */
+ ret = c2port_poll_in_busy(dev);
+ if (ret < 0)
+ return ret;
+
+ /* Should check status before starting FLASH access sequence */
+
+ /* Wait for status information */
+ ret = c2port_poll_out_ready(dev);
+ if (ret < 0)
+ return ret;
+
+ /* Read flash programming interface status */
+ ret = c2port_read_dr(dev, &status);
+ if (ret < 0)
+ return ret;
+ if (status != C2PORT_COMMAND_OK)
+ return -EBUSY;
+
+ /* Send address high byte */
+ c2port_write_dr(dev, offset >> 8);
+ ret = c2port_poll_in_busy(dev);
+ if (ret < 0)
+ return ret;
+
+ /* Send address low byte */
+ c2port_write_dr(dev, offset & 0x00ff);
+ ret = c2port_poll_in_busy(dev);
+ if (ret < 0)
+ return ret;
+
+ /* Send address block size */
+ c2port_write_dr(dev, nread);
+ ret = c2port_poll_in_busy(dev);
+ if (ret < 0)
+ return ret;
+
+ /* Should check status before reading FLASH block */
+
+ /* Wait for status information */
+ ret = c2port_poll_out_ready(dev);
+ if (ret < 0)
+ return ret;
+
+ /* Read flash programming interface status */
+ ret = c2port_read_dr(dev, &status);
+ if (ret < 0)
+ return ret;
+ if (status != C2PORT_COMMAND_OK)
+ return -EBUSY;
+
+ /* Read flash block */
+ for (i = 0; i < nread; i++) {
+ ret = c2port_poll_out_ready(dev);
+ if (ret < 0)
+ return ret;
+
+ ret = c2port_read_dr(dev, buffer+i);
+ if (ret < 0)
+ return ret;
+ }
+
+ return nread;
+}
+
+static ssize_t c2port_read_flash_data(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buffer, loff_t offset, size_t count)
+{
+ struct c2port_device *c2dev =
+ dev_get_drvdata(container_of(kobj,
+ struct device, kobj));
+ ssize_t ret;
+
+ /* Check the device and flash access status */
+ if (!c2dev->access || !c2dev->flash_access)
+ return -EBUSY;
+
+ mutex_lock(&c2dev->mutex);
+ ret = __c2port_read_flash_data(c2dev, buffer, offset, count);
+ mutex_unlock(&c2dev->mutex);
+
+ if (ret < 0)
+ dev_err(c2dev->dev, "cannot read %s flash\n", c2dev->name);
+
+ return ret;
+}
+
+static ssize_t __c2port_write_flash_data(struct c2port_device *dev,
+ char *buffer, loff_t offset, size_t count)
+{
+ struct c2port_ops *ops = dev->ops;
+ u8 status, nwrite = 128;
+ int i, ret;
+
+ if (nwrite > count)
+ nwrite = count;
+ if (ops->block_size * ops->blocks_num - offset < nwrite)
+ nwrite = ops->block_size * ops->blocks_num - offset;
+
+ /* Check for flash end */
+ if (offset >= ops->block_size * ops->blocks_num)
+ return -EINVAL;
+
+ /* Target the C2 flash programming data register for C2 data register
+ * access */
+ c2port_write_ar(dev, C2PORT_FPDAT);
+
+ /* Send flash block write command */
+ c2port_write_dr(dev, C2PORT_BLOCK_WRITE);
+
+ /* Wait for input acknowledge */
+ ret = c2port_poll_in_busy(dev);
+ if (ret < 0)
+ return ret;
+
+ /* Should check status before starting FLASH access sequence */
+
+ /* Wait for status information */
+ ret = c2port_poll_out_ready(dev);
+ if (ret < 0)
+ return ret;
+
+ /* Read flash programming interface status */
+ ret = c2port_read_dr(dev, &status);
+ if (ret < 0)
+ return ret;
+ if (status != C2PORT_COMMAND_OK)
+ return -EBUSY;
+
+ /* Send address high byte */
+ c2port_write_dr(dev, offset >> 8);
+ ret = c2port_poll_in_busy(dev);
+ if (ret < 0)
+ return ret;
+
+ /* Send address low byte */
+ c2port_write_dr(dev, offset & 0x00ff);
+ ret = c2port_poll_in_busy(dev);
+ if (ret < 0)
+ return ret;
+
+ /* Send address block size */
+ c2port_write_dr(dev, nwrite);
+ ret = c2port_poll_in_busy(dev);
+ if (ret < 0)
+ return ret;
+
+ /* Should check status before writing FLASH block */
+
+ /* Wait for status information */
+ ret = c2port_poll_out_ready(dev);
+ if (ret < 0)
+ return ret;
+
+ /* Read flash programming interface status */
+ ret = c2port_read_dr(dev, &status);
+ if (ret < 0)
+ return ret;
+ if (status != C2PORT_COMMAND_OK)
+ return -EBUSY;
+
+ /* Write flash block */
+ for (i = 0; i < nwrite; i++) {
+ ret = c2port_write_dr(dev, *(buffer+i));
+ if (ret < 0)
+ return ret;
+
+ ret = c2port_poll_in_busy(dev);
+ if (ret < 0)
+ return ret;
+
+ }
+
+ /* Wait for last flash write to complete */
+ ret = c2port_poll_out_ready(dev);
+ if (ret < 0)
+ return ret;
+
+ return nwrite;
+}
+
+static ssize_t c2port_write_flash_data(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buffer, loff_t offset, size_t count)
+{
+ struct c2port_device *c2dev =
+ dev_get_drvdata(container_of(kobj,
+ struct device, kobj));
+ int ret;
+
+ /* Check the device access status */
+ if (!c2dev->access || !c2dev->flash_access)
+ return -EBUSY;
+
+ mutex_lock(&c2dev->mutex);
+ ret = __c2port_write_flash_data(c2dev, buffer, offset, count);
+ mutex_unlock(&c2dev->mutex);
+
+ if (ret < 0)
+ dev_err(c2dev->dev, "cannot write %s flash\n", c2dev->name);
+
+ return ret;
+}
+
+/*
+ * Class attributes
+ */
+
+static struct device_attribute c2port_attrs[] = {
+ __ATTR(name, 0444, c2port_show_name, NULL),
+ __ATTR(flash_blocks_num, 0444, c2port_show_flash_blocks_num, NULL),
+ __ATTR(flash_block_size, 0444, c2port_show_flash_block_size, NULL),
+ __ATTR(flash_size, 0444, c2port_show_flash_size, NULL),
+ __ATTR(access, 0644, c2port_show_access, c2port_store_access),
+ __ATTR(reset, 0200, NULL, c2port_store_reset),
+ __ATTR(dev_id, 0444, c2port_show_dev_id, NULL),
+ __ATTR(rev_id, 0444, c2port_show_rev_id, NULL),
+
+ __ATTR(flash_access, 0644, c2port_show_flash_access,
+ c2port_store_flash_access),
+ __ATTR(flash_erase, 0200, NULL, c2port_store_flash_erase),
+ __ATTR_NULL,
+};
+
+static struct bin_attribute c2port_bin_attrs = {
+ .attr = {
+ .name = "flash_data",
+ .mode = 0644
+ },
+ .read = c2port_read_flash_data,
+ .write = c2port_write_flash_data,
+ /* .size is computed at run-time */
+};
+
+/*
+ * Exported functions
+ */
+
+struct c2port_device *c2port_device_register(char *name,
+ struct c2port_ops *ops, void *devdata)
+{
+ struct c2port_device *c2dev;
+ int id, ret;
+
+ if (unlikely(!ops) || unlikely(!ops->access) || \
+ unlikely(!ops->c2d_dir) || unlikely(!ops->c2ck_set) || \
+ unlikely(!ops->c2d_get) || unlikely(!ops->c2d_set))
+ return ERR_PTR(-EINVAL);
+
+ c2dev = kmalloc(sizeof(struct c2port_device), GFP_KERNEL);
+ kmemcheck_annotate_bitfield(c2dev, flags);
+ if (unlikely(!c2dev))
+ return ERR_PTR(-ENOMEM);
+
+ ret = idr_pre_get(&c2port_idr, GFP_KERNEL);
+ if (!ret) {
+ ret = -ENOMEM;
+ goto error_idr_get_new;
+ }
+
+ spin_lock_irq(&c2port_idr_lock);
+ ret = idr_get_new(&c2port_idr, c2dev, &id);
+ spin_unlock_irq(&c2port_idr_lock);
+
+ if (ret < 0)
+ goto error_idr_get_new;
+ c2dev->id = id;
+
+ c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
+ "c2port%d", id);
+ if (unlikely(IS_ERR(c2dev->dev))) {
+ ret = PTR_ERR(c2dev->dev);
+ goto error_device_create;
+ }
+ dev_set_drvdata(c2dev->dev, c2dev);
+
+ strncpy(c2dev->name, name, C2PORT_NAME_LEN);
+ c2dev->ops = ops;
+ mutex_init(&c2dev->mutex);
+
+ /* Create binary file */
+ c2port_bin_attrs.size = ops->blocks_num * ops->block_size;
+ ret = device_create_bin_file(c2dev->dev, &c2port_bin_attrs);
+ if (unlikely(ret))
+ goto error_device_create_bin_file;
+
+ /* By default C2 port access is off */
+ c2dev->access = c2dev->flash_access = 0;
+ ops->access(c2dev, 0);
+
+ dev_info(c2dev->dev, "C2 port %s added\n", name);
+ dev_info(c2dev->dev, "%s flash has %d blocks x %d bytes "
+ "(%d bytes total)\n",
+ name, ops->blocks_num, ops->block_size,
+ ops->blocks_num * ops->block_size);
+
+ return c2dev;
+
+error_device_create_bin_file:
+ device_destroy(c2port_class, 0);
+
+error_device_create:
+ spin_lock_irq(&c2port_idr_lock);
+ idr_remove(&c2port_idr, id);
+ spin_unlock_irq(&c2port_idr_lock);
+
+error_idr_get_new:
+ kfree(c2dev);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(c2port_device_register);
+
+void c2port_device_unregister(struct c2port_device *c2dev)
+{
+ if (!c2dev)
+ return;
+
+ dev_info(c2dev->dev, "C2 port %s removed\n", c2dev->name);
+
+ device_remove_bin_file(c2dev->dev, &c2port_bin_attrs);
+ spin_lock_irq(&c2port_idr_lock);
+ idr_remove(&c2port_idr, c2dev->id);
+ spin_unlock_irq(&c2port_idr_lock);
+
+ device_destroy(c2port_class, c2dev->id);
+
+ kfree(c2dev);
+}
+EXPORT_SYMBOL(c2port_device_unregister);
+
+/*
+ * Module stuff
+ */
+
+static int __init c2port_init(void)
+{
+ printk(KERN_INFO "Silicon Labs C2 port support v. " DRIVER_VERSION
+ " - (C) 2007 Rodolfo Giometti\n");
+
+ c2port_class = class_create(THIS_MODULE, "c2port");
+ if (IS_ERR(c2port_class)) {
+ printk(KERN_ERR "c2port: failed to allocate class\n");
+ return PTR_ERR(c2port_class);
+ }
+ c2port_class->dev_attrs = c2port_attrs;
+
+ return 0;
+}
+
+static void __exit c2port_exit(void)
+{
+ class_destroy(c2port_class);
+}
+
+module_init(c2port_init);
+module_exit(c2port_exit);
+
+MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>");
+MODULE_DESCRIPTION("Silicon Labs C2 port support v. " DRIVER_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/carma/Kconfig b/drivers/misc/carma/Kconfig
new file mode 100644
index 00000000..c90370ed
--- /dev/null
+++ b/drivers/misc/carma/Kconfig
@@ -0,0 +1,17 @@
+config CARMA_FPGA
+ tristate "CARMA DATA-FPGA Access Driver"
+ depends on FSL_SOC && PPC_83xx && MEDIA_SUPPORT && HAS_DMA && FSL_DMA
+ select VIDEOBUF_DMA_SG
+ default n
+ help
+ Say Y here to include support for communicating with the data
+ processing FPGAs on the OVRO CARMA board.
+
+config CARMA_FPGA_PROGRAM
+ tristate "CARMA DATA-FPGA Programmer"
+ depends on FSL_SOC && PPC_83xx && MEDIA_SUPPORT && HAS_DMA && FSL_DMA
+ select VIDEOBUF_DMA_SG
+ default n
+ help
+ Say Y here to include support for programming the data processing
+ FPGAs on the OVRO CARMA board.
diff --git a/drivers/misc/carma/Makefile b/drivers/misc/carma/Makefile
new file mode 100644
index 00000000..ff36ac2c
--- /dev/null
+++ b/drivers/misc/carma/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_CARMA_FPGA) += carma-fpga.o
+obj-$(CONFIG_CARMA_FPGA_PROGRAM) += carma-fpga-program.o
diff --git a/drivers/misc/carma/carma-fpga-program.c b/drivers/misc/carma/carma-fpga-program.c
new file mode 100644
index 00000000..a2d25e48
--- /dev/null
+++ b/drivers/misc/carma/carma-fpga-program.c
@@ -0,0 +1,1140 @@
+/*
+ * CARMA Board DATA-FPGA Programmer
+ *
+ * Copyright (c) 2009-2011 Ira W. Snyder <iws@ovro.caltech.edu>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/of_platform.h>
+#include <linux/completion.h>
+#include <linux/miscdevice.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/highmem.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/leds.h>
+#include <linux/slab.h>
+#include <linux/kref.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+
+#include <media/videobuf-dma-sg.h>
+
+/* MPC8349EMDS specific get_immrbase() */
+#include <sysdev/fsl_soc.h>
+
+static const char drv_name[] = "carma-fpga-program";
+
+/*
+ * Firmware images are always this exact size
+ *
+ * 12849552 bytes for a CARMA Digitizer Board (EP2S90 FPGAs)
+ * 18662880 bytes for a CARMA Correlator Board (EP2S130 FPGAs)
+ */
+#define FW_SIZE_EP2S90 12849552
+#define FW_SIZE_EP2S130 18662880
+
+struct fpga_dev {
+ struct miscdevice miscdev;
+
+ /* Reference count */
+ struct kref ref;
+
+ /* Device Registers */
+ struct device *dev;
+ void __iomem *regs;
+ void __iomem *immr;
+
+ /* Freescale DMA Device */
+ struct dma_chan *chan;
+
+ /* Interrupts */
+ int irq, status;
+ struct completion completion;
+
+ /* FPGA Bitfile */
+ struct mutex lock;
+
+ struct videobuf_dmabuf vb;
+ bool vb_allocated;
+
+ /* max size and written bytes */
+ size_t fw_size;
+ size_t bytes;
+};
+
+/*
+ * FPGA Bitfile Helpers
+ */
+
+/**
+ * fpga_drop_firmware_data() - drop the bitfile image from memory
+ * @priv: the driver's private data structure
+ *
+ * LOCKING: must hold priv->lock
+ */
+static void fpga_drop_firmware_data(struct fpga_dev *priv)
+{
+ videobuf_dma_free(&priv->vb);
+ priv->vb_allocated = false;
+ priv->bytes = 0;
+}
+
+/*
+ * Private Data Reference Count
+ */
+
+static void fpga_dev_remove(struct kref *ref)
+{
+ struct fpga_dev *priv = container_of(ref, struct fpga_dev, ref);
+
+ /* free any firmware image that was not programmed */
+ fpga_drop_firmware_data(priv);
+
+ mutex_destroy(&priv->lock);
+ kfree(priv);
+}
+
+/*
+ * LED Trigger (could be a seperate module)
+ */
+
+/*
+ * NOTE: this whole thing does have the problem that whenever the led's are
+ * NOTE: first set to use the fpga trigger, they could be in the wrong state
+ */
+
+DEFINE_LED_TRIGGER(ledtrig_fpga);
+
+static void ledtrig_fpga_programmed(bool enabled)
+{
+ if (enabled)
+ led_trigger_event(ledtrig_fpga, LED_FULL);
+ else
+ led_trigger_event(ledtrig_fpga, LED_OFF);
+}
+
+/*
+ * FPGA Register Helpers
+ */
+
+/* Register Definitions */
+#define FPGA_CONFIG_CONTROL 0x40
+#define FPGA_CONFIG_STATUS 0x44
+#define FPGA_CONFIG_FIFO_SIZE 0x48
+#define FPGA_CONFIG_FIFO_USED 0x4C
+#define FPGA_CONFIG_TOTAL_BYTE_COUNT 0x50
+#define FPGA_CONFIG_CUR_BYTE_COUNT 0x54
+
+#define FPGA_FIFO_ADDRESS 0x3000
+
+static int fpga_fifo_size(void __iomem *regs)
+{
+ return ioread32be(regs + FPGA_CONFIG_FIFO_SIZE);
+}
+
+#define CFG_STATUS_ERR_MASK 0xfffe
+
+static int fpga_config_error(void __iomem *regs)
+{
+ return ioread32be(regs + FPGA_CONFIG_STATUS) & CFG_STATUS_ERR_MASK;
+}
+
+static int fpga_fifo_empty(void __iomem *regs)
+{
+ return ioread32be(regs + FPGA_CONFIG_FIFO_USED) == 0;
+}
+
+static void fpga_fifo_write(void __iomem *regs, u32 val)
+{
+ iowrite32be(val, regs + FPGA_FIFO_ADDRESS);
+}
+
+static void fpga_set_byte_count(void __iomem *regs, u32 count)
+{
+ iowrite32be(count, regs + FPGA_CONFIG_TOTAL_BYTE_COUNT);
+}
+
+#define CFG_CTL_ENABLE (1 << 0)
+#define CFG_CTL_RESET (1 << 1)
+#define CFG_CTL_DMA (1 << 2)
+
+static void fpga_programmer_enable(struct fpga_dev *priv, bool dma)
+{
+ u32 val;
+
+ val = (dma) ? (CFG_CTL_ENABLE | CFG_CTL_DMA) : CFG_CTL_ENABLE;
+ iowrite32be(val, priv->regs + FPGA_CONFIG_CONTROL);
+}
+
+static void fpga_programmer_disable(struct fpga_dev *priv)
+{
+ iowrite32be(0x0, priv->regs + FPGA_CONFIG_CONTROL);
+}
+
+static void fpga_dump_registers(struct fpga_dev *priv)
+{
+ u32 control, status, size, used, total, curr;
+
+ /* good status: do nothing */
+ if (priv->status == 0)
+ return;
+
+ /* Dump all status registers */
+ control = ioread32be(priv->regs + FPGA_CONFIG_CONTROL);
+ status = ioread32be(priv->regs + FPGA_CONFIG_STATUS);
+ size = ioread32be(priv->regs + FPGA_CONFIG_FIFO_SIZE);
+ used = ioread32be(priv->regs + FPGA_CONFIG_FIFO_USED);
+ total = ioread32be(priv->regs + FPGA_CONFIG_TOTAL_BYTE_COUNT);
+ curr = ioread32be(priv->regs + FPGA_CONFIG_CUR_BYTE_COUNT);
+
+ dev_err(priv->dev, "Configuration failed, dumping status registers\n");
+ dev_err(priv->dev, "Control: 0x%.8x\n", control);
+ dev_err(priv->dev, "Status: 0x%.8x\n", status);
+ dev_err(priv->dev, "FIFO Size: 0x%.8x\n", size);
+ dev_err(priv->dev, "FIFO Used: 0x%.8x\n", used);
+ dev_err(priv->dev, "FIFO Total: 0x%.8x\n", total);
+ dev_err(priv->dev, "FIFO Curr: 0x%.8x\n", curr);
+}
+
+/*
+ * FPGA Power Supply Code
+ */
+
+#define CTL_PWR_CONTROL 0x2006
+#define CTL_PWR_STATUS 0x200A
+#define CTL_PWR_FAIL 0x200B
+
+#define PWR_CONTROL_ENABLE 0x01
+
+#define PWR_STATUS_ERROR_MASK 0x10
+#define PWR_STATUS_GOOD 0x0f
+
+/*
+ * Determine if the FPGA power is good for all supplies
+ */
+static bool fpga_power_good(struct fpga_dev *priv)
+{
+ u8 val;
+
+ val = ioread8(priv->regs + CTL_PWR_STATUS);
+ if (val & PWR_STATUS_ERROR_MASK)
+ return false;
+
+ return val == PWR_STATUS_GOOD;
+}
+
+/*
+ * Disable the FPGA power supplies
+ */
+static void fpga_disable_power_supplies(struct fpga_dev *priv)
+{
+ unsigned long start;
+ u8 val;
+
+ iowrite8(0x0, priv->regs + CTL_PWR_CONTROL);
+
+ /*
+ * Wait 500ms for the power rails to discharge
+ *
+ * Without this delay, the CTL-CPLD state machine can get into a
+ * state where it is waiting for the power-goods to assert, but they
+ * never do. This only happens when enabling and disabling the
+ * power sequencer very rapidly.
+ *
+ * The loop below will also wait for the power goods to de-assert,
+ * but testing has shown that they are always disabled by the time
+ * the sleep completes. However, omitting the sleep and only waiting
+ * for the power-goods to de-assert was not sufficient to ensure
+ * that the power sequencer would not wedge itself.
+ */
+ msleep(500);
+
+ start = jiffies;
+ while (time_before(jiffies, start + HZ)) {
+ val = ioread8(priv->regs + CTL_PWR_STATUS);
+ if (!(val & PWR_STATUS_GOOD))
+ break;
+
+ usleep_range(5000, 10000);
+ }
+
+ val = ioread8(priv->regs + CTL_PWR_STATUS);
+ if (val & PWR_STATUS_GOOD) {
+ dev_err(priv->dev, "power disable failed: "
+ "power goods: status 0x%.2x\n", val);
+ }
+
+ if (val & PWR_STATUS_ERROR_MASK) {
+ dev_err(priv->dev, "power disable failed: "
+ "alarm bit set: status 0x%.2x\n", val);
+ }
+}
+
+/**
+ * fpga_enable_power_supplies() - enable the DATA-FPGA power supplies
+ * @priv: the driver's private data structure
+ *
+ * Enable the DATA-FPGA power supplies, waiting up to 1 second for
+ * them to enable successfully.
+ *
+ * Returns 0 on success, -ERRNO otherwise
+ */
+static int fpga_enable_power_supplies(struct fpga_dev *priv)
+{
+ unsigned long start = jiffies;
+
+ if (fpga_power_good(priv)) {
+ dev_dbg(priv->dev, "power was already good\n");
+ return 0;
+ }
+
+ iowrite8(PWR_CONTROL_ENABLE, priv->regs + CTL_PWR_CONTROL);
+ while (time_before(jiffies, start + HZ)) {
+ if (fpga_power_good(priv))
+ return 0;
+
+ usleep_range(5000, 10000);
+ }
+
+ return fpga_power_good(priv) ? 0 : -ETIMEDOUT;
+}
+
+/*
+ * Determine if the FPGA power supplies are all enabled
+ */
+static bool fpga_power_enabled(struct fpga_dev *priv)
+{
+ u8 val;
+
+ val = ioread8(priv->regs + CTL_PWR_CONTROL);
+ if (val & PWR_CONTROL_ENABLE)
+ return true;
+
+ return false;
+}
+
+/*
+ * Determine if the FPGA's are programmed and running correctly
+ */
+static bool fpga_running(struct fpga_dev *priv)
+{
+ if (!fpga_power_good(priv))
+ return false;
+
+ /* Check the config done bit */
+ return ioread32be(priv->regs + FPGA_CONFIG_STATUS) & (1 << 18);
+}
+
+/*
+ * FPGA Programming Code
+ */
+
+/**
+ * fpga_program_block() - put a block of data into the programmer's FIFO
+ * @priv: the driver's private data structure
+ * @buf: the data to program
+ * @count: the length of data to program (must be a multiple of 4 bytes)
+ *
+ * Returns 0 on success, -ERRNO otherwise
+ */
+static int fpga_program_block(struct fpga_dev *priv, void *buf, size_t count)
+{
+ u32 *data = buf;
+ int size = fpga_fifo_size(priv->regs);
+ int i, len;
+ unsigned long timeout;
+
+ /* enforce correct data length for the FIFO */
+ BUG_ON(count % 4 != 0);
+
+ while (count > 0) {
+
+ /* Get the size of the block to write (maximum is FIFO_SIZE) */
+ len = min_t(size_t, count, size);
+ timeout = jiffies + HZ / 4;
+
+ /* Write the block */
+ for (i = 0; i < len / 4; i++)
+ fpga_fifo_write(priv->regs, data[i]);
+
+ /* Update the amounts left */
+ count -= len;
+ data += len / 4;
+
+ /* Wait for the fifo to empty */
+ while (true) {
+
+ if (fpga_fifo_empty(priv->regs)) {
+ break;
+ } else {
+ dev_dbg(priv->dev, "Fifo not empty\n");
+ cpu_relax();
+ }
+
+ if (fpga_config_error(priv->regs)) {
+ dev_err(priv->dev, "Error detected\n");
+ return -EIO;
+ }
+
+ if (time_after(jiffies, timeout)) {
+ dev_err(priv->dev, "Fifo drain timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ usleep_range(5000, 10000);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * fpga_program_cpu() - program the DATA-FPGA's using the CPU
+ * @priv: the driver's private data structure
+ *
+ * This is useful when the DMA programming method fails. It is possible to
+ * wedge the Freescale DMA controller such that the DMA programming method
+ * always fails. This method has always succeeded.
+ *
+ * Returns 0 on success, -ERRNO otherwise
+ */
+static noinline int fpga_program_cpu(struct fpga_dev *priv)
+{
+ int ret;
+
+ /* Disable the programmer */
+ fpga_programmer_disable(priv);
+
+ /* Set the total byte count */
+ fpga_set_byte_count(priv->regs, priv->bytes);
+ dev_dbg(priv->dev, "total byte count %u bytes\n", priv->bytes);
+
+ /* Enable the controller for programming */
+ fpga_programmer_enable(priv, false);
+ dev_dbg(priv->dev, "enabled the controller\n");
+
+ /* Write each chunk of the FPGA bitfile to FPGA programmer */
+ ret = fpga_program_block(priv, priv->vb.vaddr, priv->bytes);
+ if (ret)
+ goto out_disable_controller;
+
+ /* Wait for the interrupt handler to signal that programming finished */
+ ret = wait_for_completion_timeout(&priv->completion, 2 * HZ);
+ if (!ret) {
+ dev_err(priv->dev, "Timed out waiting for completion\n");
+ ret = -ETIMEDOUT;
+ goto out_disable_controller;
+ }
+
+ /* Retrieve the status from the interrupt handler */
+ ret = priv->status;
+
+out_disable_controller:
+ fpga_programmer_disable(priv);
+ return ret;
+}
+
+#define FIFO_DMA_ADDRESS 0xf0003000
+#define FIFO_MAX_LEN 4096
+
+/**
+ * fpga_program_dma() - program the DATA-FPGA's using the DMA engine
+ * @priv: the driver's private data structure
+ *
+ * Program the DATA-FPGA's using the Freescale DMA engine. This requires that
+ * the engine is programmed such that the hardware DMA request lines can
+ * control the entire DMA transaction. The system controller FPGA then
+ * completely offloads the programming from the CPU.
+ *
+ * Returns 0 on success, -ERRNO otherwise
+ */
+static noinline int fpga_program_dma(struct fpga_dev *priv)
+{
+ struct videobuf_dmabuf *vb = &priv->vb;
+ struct dma_chan *chan = priv->chan;
+ struct dma_async_tx_descriptor *tx;
+ size_t num_pages, len, avail = 0;
+ struct dma_slave_config config;
+ struct scatterlist *sg;
+ struct sg_table table;
+ dma_cookie_t cookie;
+ int ret, i;
+
+ /* Disable the programmer */
+ fpga_programmer_disable(priv);
+
+ /* Allocate a scatterlist for the DMA destination */
+ num_pages = DIV_ROUND_UP(priv->bytes, FIFO_MAX_LEN);
+ ret = sg_alloc_table(&table, num_pages, GFP_KERNEL);
+ if (ret) {
+ dev_err(priv->dev, "Unable to allocate dst scatterlist\n");
+ ret = -ENOMEM;
+ goto out_return;
+ }
+
+ /*
+ * This is an ugly hack
+ *
+ * We fill in a scatterlist as if it were mapped for DMA. This is
+ * necessary because there exists no better structure for this
+ * inside the kernel code.
+ *
+ * As an added bonus, we can use the DMAEngine API for all of this,
+ * rather than inventing another extremely similar API.
+ */
+ avail = priv->bytes;
+ for_each_sg(table.sgl, sg, num_pages, i) {
+ len = min_t(size_t, avail, FIFO_MAX_LEN);
+ sg_dma_address(sg) = FIFO_DMA_ADDRESS;
+ sg_dma_len(sg) = len;
+
+ avail -= len;
+ }
+
+ /* Map the buffer for DMA */
+ ret = videobuf_dma_map(priv->dev, &priv->vb);
+ if (ret) {
+ dev_err(priv->dev, "Unable to map buffer for DMA\n");
+ goto out_free_table;
+ }
+
+ /*
+ * Configure the DMA channel to transfer FIFO_SIZE / 2 bytes per
+ * transaction, and then put it under external control
+ */
+ memset(&config, 0, sizeof(config));
+ config.direction = DMA_MEM_TO_DEV;
+ config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ config.dst_maxburst = fpga_fifo_size(priv->regs) / 2 / 4;
+ ret = chan->device->device_control(chan, DMA_SLAVE_CONFIG,
+ (unsigned long)&config);
+ if (ret) {
+ dev_err(priv->dev, "DMA slave configuration failed\n");
+ goto out_dma_unmap;
+ }
+
+ ret = chan->device->device_control(chan, FSLDMA_EXTERNAL_START, 1);
+ if (ret) {
+ dev_err(priv->dev, "DMA external control setup failed\n");
+ goto out_dma_unmap;
+ }
+
+ /* setup and submit the DMA transaction */
+ tx = chan->device->device_prep_dma_sg(chan,
+ table.sgl, num_pages,
+ vb->sglist, vb->sglen, 0);
+ if (!tx) {
+ dev_err(priv->dev, "Unable to prep DMA transaction\n");
+ ret = -ENOMEM;
+ goto out_dma_unmap;
+ }
+
+ cookie = tx->tx_submit(tx);
+ if (dma_submit_error(cookie)) {
+ dev_err(priv->dev, "Unable to submit DMA transaction\n");
+ ret = -ENOMEM;
+ goto out_dma_unmap;
+ }
+
+ dma_async_memcpy_issue_pending(chan);
+
+ /* Set the total byte count */
+ fpga_set_byte_count(priv->regs, priv->bytes);
+ dev_dbg(priv->dev, "total byte count %u bytes\n", priv->bytes);
+
+ /* Enable the controller for DMA programming */
+ fpga_programmer_enable(priv, true);
+ dev_dbg(priv->dev, "enabled the controller\n");
+
+ /* Wait for the interrupt handler to signal that programming finished */
+ ret = wait_for_completion_timeout(&priv->completion, 2 * HZ);
+ if (!ret) {
+ dev_err(priv->dev, "Timed out waiting for completion\n");
+ ret = -ETIMEDOUT;
+ goto out_disable_controller;
+ }
+
+ /* Retrieve the status from the interrupt handler */
+ ret = priv->status;
+
+out_disable_controller:
+ fpga_programmer_disable(priv);
+out_dma_unmap:
+ videobuf_dma_unmap(priv->dev, vb);
+out_free_table:
+ sg_free_table(&table);
+out_return:
+ return ret;
+}
+
+/*
+ * Interrupt Handling
+ */
+
+static irqreturn_t fpga_irq(int irq, void *dev_id)
+{
+ struct fpga_dev *priv = dev_id;
+
+ /* Save the status */
+ priv->status = fpga_config_error(priv->regs) ? -EIO : 0;
+ dev_dbg(priv->dev, "INTERRUPT status %d\n", priv->status);
+ fpga_dump_registers(priv);
+
+ /* Disabling the programmer clears the interrupt */
+ fpga_programmer_disable(priv);
+
+ /* Notify any waiters */
+ complete(&priv->completion);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * SYSFS Helpers
+ */
+
+/**
+ * fpga_do_stop() - deconfigure (reset) the DATA-FPGA's
+ * @priv: the driver's private data structure
+ *
+ * LOCKING: must hold priv->lock
+ */
+static int fpga_do_stop(struct fpga_dev *priv)
+{
+ u32 val;
+
+ /* Set the led to unprogrammed */
+ ledtrig_fpga_programmed(false);
+
+ /* Pulse the config line to reset the FPGA's */
+ val = CFG_CTL_ENABLE | CFG_CTL_RESET;
+ iowrite32be(val, priv->regs + FPGA_CONFIG_CONTROL);
+ iowrite32be(0x0, priv->regs + FPGA_CONFIG_CONTROL);
+
+ return 0;
+}
+
+static noinline int fpga_do_program(struct fpga_dev *priv)
+{
+ int ret;
+
+ if (priv->bytes != priv->fw_size) {
+ dev_err(priv->dev, "Incorrect bitfile size: got %zu bytes, "
+ "should be %zu bytes\n",
+ priv->bytes, priv->fw_size);
+ return -EINVAL;
+ }
+
+ if (!fpga_power_enabled(priv)) {
+ dev_err(priv->dev, "Power not enabled\n");
+ return -EINVAL;
+ }
+
+ if (!fpga_power_good(priv)) {
+ dev_err(priv->dev, "Power not good\n");
+ return -EINVAL;
+ }
+
+ /* Set the LED to unprogrammed */
+ ledtrig_fpga_programmed(false);
+
+ /* Try to program the FPGA's using DMA */
+ ret = fpga_program_dma(priv);
+
+ /* If DMA failed or doesn't exist, try with CPU */
+ if (ret) {
+ dev_warn(priv->dev, "Falling back to CPU programming\n");
+ ret = fpga_program_cpu(priv);
+ }
+
+ if (ret) {
+ dev_err(priv->dev, "Unable to program FPGA's\n");
+ return ret;
+ }
+
+ /* Drop the firmware bitfile from memory */
+ fpga_drop_firmware_data(priv);
+
+ dev_dbg(priv->dev, "FPGA programming successful\n");
+ ledtrig_fpga_programmed(true);
+
+ return 0;
+}
+
+/*
+ * File Operations
+ */
+
+static int fpga_open(struct inode *inode, struct file *filp)
+{
+ /*
+ * The miscdevice layer puts our struct miscdevice into the
+ * filp->private_data field. We use this to find our private
+ * data and then overwrite it with our own private structure.
+ */
+ struct fpga_dev *priv = container_of(filp->private_data,
+ struct fpga_dev, miscdev);
+ unsigned int nr_pages;
+ int ret;
+
+ /* We only allow one process at a time */
+ ret = mutex_lock_interruptible(&priv->lock);
+ if (ret)
+ return ret;
+
+ filp->private_data = priv;
+ kref_get(&priv->ref);
+
+ /* Truncation: drop any existing data */
+ if (filp->f_flags & O_TRUNC)
+ priv->bytes = 0;
+
+ /* Check if we have already allocated a buffer */
+ if (priv->vb_allocated)
+ return 0;
+
+ /* Allocate a buffer to hold enough data for the bitfile */
+ nr_pages = DIV_ROUND_UP(priv->fw_size, PAGE_SIZE);
+ ret = videobuf_dma_init_kernel(&priv->vb, DMA_TO_DEVICE, nr_pages);
+ if (ret) {
+ dev_err(priv->dev, "unable to allocate data buffer\n");
+ mutex_unlock(&priv->lock);
+ kref_put(&priv->ref, fpga_dev_remove);
+ return ret;
+ }
+
+ priv->vb_allocated = true;
+ return 0;
+}
+
+static int fpga_release(struct inode *inode, struct file *filp)
+{
+ struct fpga_dev *priv = filp->private_data;
+
+ mutex_unlock(&priv->lock);
+ kref_put(&priv->ref, fpga_dev_remove);
+ return 0;
+}
+
+static ssize_t fpga_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct fpga_dev *priv = filp->private_data;
+
+ /* FPGA bitfiles have an exact size: disallow anything else */
+ if (priv->bytes >= priv->fw_size)
+ return -ENOSPC;
+
+ count = min_t(size_t, priv->fw_size - priv->bytes, count);
+ if (copy_from_user(priv->vb.vaddr + priv->bytes, buf, count))
+ return -EFAULT;
+
+ priv->bytes += count;
+ return count;
+}
+
+static ssize_t fpga_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *f_pos)
+{
+ struct fpga_dev *priv = filp->private_data;
+
+ count = min_t(size_t, priv->bytes - *f_pos, count);
+ if (copy_to_user(buf, priv->vb.vaddr + *f_pos, count))
+ return -EFAULT;
+
+ *f_pos += count;
+ return count;
+}
+
+static loff_t fpga_llseek(struct file *filp, loff_t offset, int origin)
+{
+ struct fpga_dev *priv = filp->private_data;
+ loff_t newpos;
+
+ /* only read-only opens are allowed to seek */
+ if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
+ return -EINVAL;
+
+ switch (origin) {
+ case SEEK_SET: /* seek relative to the beginning of the file */
+ newpos = offset;
+ break;
+ case SEEK_CUR: /* seek relative to current position in the file */
+ newpos = filp->f_pos + offset;
+ break;
+ case SEEK_END: /* seek relative to the end of the file */
+ newpos = priv->fw_size - offset;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* check for sanity */
+ if (newpos > priv->fw_size)
+ return -EINVAL;
+
+ filp->f_pos = newpos;
+ return newpos;
+}
+
+static const struct file_operations fpga_fops = {
+ .open = fpga_open,
+ .release = fpga_release,
+ .write = fpga_write,
+ .read = fpga_read,
+ .llseek = fpga_llseek,
+};
+
+/*
+ * Device Attributes
+ */
+
+static ssize_t pfail_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct fpga_dev *priv = dev_get_drvdata(dev);
+ u8 val;
+
+ val = ioread8(priv->regs + CTL_PWR_FAIL);
+ return snprintf(buf, PAGE_SIZE, "0x%.2x\n", val);
+}
+
+static ssize_t pgood_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct fpga_dev *priv = dev_get_drvdata(dev);
+ return snprintf(buf, PAGE_SIZE, "%d\n", fpga_power_good(priv));
+}
+
+static ssize_t penable_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct fpga_dev *priv = dev_get_drvdata(dev);
+ return snprintf(buf, PAGE_SIZE, "%d\n", fpga_power_enabled(priv));
+}
+
+static ssize_t penable_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fpga_dev *priv = dev_get_drvdata(dev);
+ unsigned long val;
+ int ret;
+
+ if (strict_strtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val) {
+ ret = fpga_enable_power_supplies(priv);
+ if (ret)
+ return ret;
+ } else {
+ fpga_do_stop(priv);
+ fpga_disable_power_supplies(priv);
+ }
+
+ return count;
+}
+
+static ssize_t program_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct fpga_dev *priv = dev_get_drvdata(dev);
+ return snprintf(buf, PAGE_SIZE, "%d\n", fpga_running(priv));
+}
+
+static ssize_t program_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fpga_dev *priv = dev_get_drvdata(dev);
+ unsigned long val;
+ int ret;
+
+ if (strict_strtoul(buf, 0, &val))
+ return -EINVAL;
+
+ /* We can't have an image writer and be programming simultaneously */
+ if (mutex_lock_interruptible(&priv->lock))
+ return -ERESTARTSYS;
+
+ /* Program or Reset the FPGA's */
+ ret = val ? fpga_do_program(priv) : fpga_do_stop(priv);
+ if (ret)
+ goto out_unlock;
+
+ /* Success */
+ ret = count;
+
+out_unlock:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static DEVICE_ATTR(power_fail, S_IRUGO, pfail_show, NULL);
+static DEVICE_ATTR(power_good, S_IRUGO, pgood_show, NULL);
+static DEVICE_ATTR(power_enable, S_IRUGO | S_IWUSR,
+ penable_show, penable_store);
+
+static DEVICE_ATTR(program, S_IRUGO | S_IWUSR,
+ program_show, program_store);
+
+static struct attribute *fpga_attributes[] = {
+ &dev_attr_power_fail.attr,
+ &dev_attr_power_good.attr,
+ &dev_attr_power_enable.attr,
+ &dev_attr_program.attr,
+ NULL,
+};
+
+static const struct attribute_group fpga_attr_group = {
+ .attrs = fpga_attributes,
+};
+
+/*
+ * OpenFirmware Device Subsystem
+ */
+
+#define SYS_REG_VERSION 0x00
+#define SYS_REG_GEOGRAPHIC 0x10
+
+static bool dma_filter(struct dma_chan *chan, void *data)
+{
+ /*
+ * DMA Channel #0 is the only acceptable device
+ *
+ * This probably won't survive an unload/load cycle of the Freescale
+ * DMAEngine driver, but that won't be a problem
+ */
+ return chan->chan_id == 0 && chan->device->dev_id == 0;
+}
+
+static int fpga_of_remove(struct platform_device *op)
+{
+ struct fpga_dev *priv = dev_get_drvdata(&op->dev);
+ struct device *this_device = priv->miscdev.this_device;
+
+ sysfs_remove_group(&this_device->kobj, &fpga_attr_group);
+ misc_deregister(&priv->miscdev);
+
+ free_irq(priv->irq, priv);
+ irq_dispose_mapping(priv->irq);
+
+ /* make sure the power supplies are off */
+ fpga_disable_power_supplies(priv);
+
+ /* unmap registers */
+ iounmap(priv->immr);
+ iounmap(priv->regs);
+
+ dma_release_channel(priv->chan);
+
+ /* drop our reference to the private data structure */
+ kref_put(&priv->ref, fpga_dev_remove);
+ return 0;
+}
+
+/* CTL-CPLD Version Register */
+#define CTL_CPLD_VERSION 0x2000
+
+static int fpga_of_probe(struct platform_device *op)
+{
+ struct device_node *of_node = op->dev.of_node;
+ struct device *this_device;
+ struct fpga_dev *priv;
+ dma_cap_mask_t mask;
+ u32 ver;
+ int ret;
+
+ /* Allocate private data */
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(&op->dev, "Unable to allocate private data\n");
+ ret = -ENOMEM;
+ goto out_return;
+ }
+
+ /* Setup the miscdevice */
+ priv->miscdev.minor = MISC_DYNAMIC_MINOR;
+ priv->miscdev.name = drv_name;
+ priv->miscdev.fops = &fpga_fops;
+
+ kref_init(&priv->ref);
+
+ dev_set_drvdata(&op->dev, priv);
+ priv->dev = &op->dev;
+ mutex_init(&priv->lock);
+ init_completion(&priv->completion);
+ videobuf_dma_init(&priv->vb);
+
+ dev_set_drvdata(priv->dev, priv);
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
+ dma_cap_set(DMA_INTERRUPT, mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ dma_cap_set(DMA_SG, mask);
+
+ /* Get control of DMA channel #0 */
+ priv->chan = dma_request_channel(mask, dma_filter, NULL);
+ if (!priv->chan) {
+ dev_err(&op->dev, "Unable to acquire DMA channel #0\n");
+ ret = -ENODEV;
+ goto out_free_priv;
+ }
+
+ /* Remap the registers for use */
+ priv->regs = of_iomap(of_node, 0);
+ if (!priv->regs) {
+ dev_err(&op->dev, "Unable to ioremap registers\n");
+ ret = -ENOMEM;
+ goto out_dma_release_channel;
+ }
+
+ /* Remap the IMMR for use */
+ priv->immr = ioremap(get_immrbase(), 0x100000);
+ if (!priv->immr) {
+ dev_err(&op->dev, "Unable to ioremap IMMR\n");
+ ret = -ENOMEM;
+ goto out_unmap_regs;
+ }
+
+ /*
+ * Check that external DMA is configured
+ *
+ * U-Boot does this for us, but we should check it and bail out if
+ * there is a problem. Failing to have this register setup correctly
+ * will cause the DMA controller to transfer a single cacheline
+ * worth of data, then wedge itself.
+ */
+ if ((ioread32be(priv->immr + 0x114) & 0xE00) != 0xE00) {
+ dev_err(&op->dev, "External DMA control not configured\n");
+ ret = -ENODEV;
+ goto out_unmap_immr;
+ }
+
+ /*
+ * Check the CTL-CPLD version
+ *
+ * This driver uses the CTL-CPLD DATA-FPGA power sequencer, and we
+ * don't want to run on any version of the CTL-CPLD that does not use
+ * a compatible register layout.
+ *
+ * v2: changed register layout, added power sequencer
+ * v3: added glitch filter on the i2c overcurrent/overtemp outputs
+ */
+ ver = ioread8(priv->regs + CTL_CPLD_VERSION);
+ if (ver != 0x02 && ver != 0x03) {
+ dev_err(&op->dev, "CTL-CPLD is not version 0x02 or 0x03!\n");
+ ret = -ENODEV;
+ goto out_unmap_immr;
+ }
+
+ /* Set the exact size that the firmware image should be */
+ ver = ioread32be(priv->regs + SYS_REG_VERSION);
+ priv->fw_size = (ver & (1 << 18)) ? FW_SIZE_EP2S130 : FW_SIZE_EP2S90;
+
+ /* Find the correct IRQ number */
+ priv->irq = irq_of_parse_and_map(of_node, 0);
+ if (priv->irq == NO_IRQ) {
+ dev_err(&op->dev, "Unable to find IRQ line\n");
+ ret = -ENODEV;
+ goto out_unmap_immr;
+ }
+
+ /* Request the IRQ */
+ ret = request_irq(priv->irq, fpga_irq, IRQF_SHARED, drv_name, priv);
+ if (ret) {
+ dev_err(&op->dev, "Unable to request IRQ %d\n", priv->irq);
+ ret = -ENODEV;
+ goto out_irq_dispose_mapping;
+ }
+
+ /* Reset and stop the FPGA's, just in case */
+ fpga_do_stop(priv);
+
+ /* Register the miscdevice */
+ ret = misc_register(&priv->miscdev);
+ if (ret) {
+ dev_err(&op->dev, "Unable to register miscdevice\n");
+ goto out_free_irq;
+ }
+
+ /* Create the sysfs files */
+ this_device = priv->miscdev.this_device;
+ dev_set_drvdata(this_device, priv);
+ ret = sysfs_create_group(&this_device->kobj, &fpga_attr_group);
+ if (ret) {
+ dev_err(&op->dev, "Unable to create sysfs files\n");
+ goto out_misc_deregister;
+ }
+
+ dev_info(priv->dev, "CARMA FPGA Programmer: %s rev%s with %s FPGAs\n",
+ (ver & (1 << 17)) ? "Correlator" : "Digitizer",
+ (ver & (1 << 16)) ? "B" : "A",
+ (ver & (1 << 18)) ? "EP2S130" : "EP2S90");
+
+ return 0;
+
+out_misc_deregister:
+ misc_deregister(&priv->miscdev);
+out_free_irq:
+ free_irq(priv->irq, priv);
+out_irq_dispose_mapping:
+ irq_dispose_mapping(priv->irq);
+out_unmap_immr:
+ iounmap(priv->immr);
+out_unmap_regs:
+ iounmap(priv->regs);
+out_dma_release_channel:
+ dma_release_channel(priv->chan);
+out_free_priv:
+ kref_put(&priv->ref, fpga_dev_remove);
+out_return:
+ return ret;
+}
+
+static struct of_device_id fpga_of_match[] = {
+ { .compatible = "carma,fpga-programmer", },
+ {},
+};
+
+static struct platform_driver fpga_of_driver = {
+ .probe = fpga_of_probe,
+ .remove = fpga_of_remove,
+ .driver = {
+ .name = drv_name,
+ .of_match_table = fpga_of_match,
+ .owner = THIS_MODULE,
+ },
+};
+
+/*
+ * Module Init / Exit
+ */
+
+static int __init fpga_init(void)
+{
+ led_trigger_register_simple("fpga", &ledtrig_fpga);
+ return platform_driver_register(&fpga_of_driver);
+}
+
+static void __exit fpga_exit(void)
+{
+ platform_driver_unregister(&fpga_of_driver);
+ led_trigger_unregister_simple(ledtrig_fpga);
+}
+
+MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
+MODULE_DESCRIPTION("CARMA Board DATA-FPGA Programmer");
+MODULE_LICENSE("GPL");
+
+module_init(fpga_init);
+module_exit(fpga_exit);
diff --git a/drivers/misc/carma/carma-fpga.c b/drivers/misc/carma/carma-fpga.c
new file mode 100644
index 00000000..8c279da0
--- /dev/null
+++ b/drivers/misc/carma/carma-fpga.c
@@ -0,0 +1,1447 @@
+/*
+ * CARMA DATA-FPGA Access Driver
+ *
+ * Copyright (c) 2009-2011 Ira W. Snyder <iws@ovro.caltech.edu>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/*
+ * FPGA Memory Dump Format
+ *
+ * FPGA #0 control registers (32 x 32-bit words)
+ * FPGA #1 control registers (32 x 32-bit words)
+ * FPGA #2 control registers (32 x 32-bit words)
+ * FPGA #3 control registers (32 x 32-bit words)
+ * SYSFPGA control registers (32 x 32-bit words)
+ * FPGA #0 correlation array (NUM_CORL0 correlation blocks)
+ * FPGA #1 correlation array (NUM_CORL1 correlation blocks)
+ * FPGA #2 correlation array (NUM_CORL2 correlation blocks)
+ * FPGA #3 correlation array (NUM_CORL3 correlation blocks)
+ *
+ * Each correlation array consists of:
+ *
+ * Correlation Data (2 x NUM_LAGSn x 32-bit words)
+ * Pipeline Metadata (2 x NUM_METAn x 32-bit words)
+ * Quantization Counters (2 x NUM_QCNTn x 32-bit words)
+ *
+ * The NUM_CORLn, NUM_LAGSn, NUM_METAn, and NUM_QCNTn values come from
+ * the FPGA configuration registers. They do not change once the FPGA's
+ * have been programmed, they only change on re-programming.
+ */
+
+/*
+ * Basic Description:
+ *
+ * This driver is used to capture correlation spectra off of the four data
+ * processing FPGAs. The FPGAs are often reprogrammed at runtime, therefore
+ * this driver supports dynamic enable/disable of capture while the device
+ * remains open.
+ *
+ * The nominal capture rate is 64Hz (every 15.625ms). To facilitate this fast
+ * capture rate, all buffers are pre-allocated to avoid any potentially long
+ * running memory allocations while capturing.
+ *
+ * There are two lists and one pointer which are used to keep track of the
+ * different states of data buffers.
+ *
+ * 1) free list
+ * This list holds all empty data buffers which are ready to receive data.
+ *
+ * 2) inflight pointer
+ * This pointer holds the currently inflight data buffer. This buffer is having
+ * data copied into it by the DMA engine.
+ *
+ * 3) used list
+ * This list holds data buffers which have been filled, and are waiting to be
+ * read by userspace.
+ *
+ * All buffers start life on the free list, then move successively to the
+ * inflight pointer, and then to the used list. After they have been read by
+ * userspace, they are moved back to the free list. The cycle repeats as long
+ * as necessary.
+ *
+ * It should be noted that all buffers are mapped and ready for DMA when they
+ * are on any of the three lists. They are only unmapped when they are in the
+ * process of being read by userspace.
+ */
+
+/*
+ * Notes on the IRQ masking scheme:
+ *
+ * The IRQ masking scheme here is different than most other hardware. The only
+ * way for the DATA-FPGAs to detect if the kernel has taken too long to copy
+ * the data is if the status registers are not cleared before the next
+ * correlation data dump is ready.
+ *
+ * The interrupt line is connected to the status registers, such that when they
+ * are cleared, the interrupt is de-asserted. Therein lies our problem. We need
+ * to schedule a long-running DMA operation and return from the interrupt
+ * handler quickly, but we cannot clear the status registers.
+ *
+ * To handle this, the system controller FPGA has the capability to connect the
+ * interrupt line to a user-controlled GPIO pin. This pin is driven high
+ * (unasserted) and left that way. To mask the interrupt, we change the
+ * interrupt source to the GPIO pin. Tada, we hid the interrupt. :)
+ */
+
+#include <linux/of_platform.h>
+#include <linux/dma-mapping.h>
+#include <linux/miscdevice.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+#include <linux/seq_file.h>
+#include <linux/highmem.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/kref.h>
+#include <linux/io.h>
+
+#include <media/videobuf-dma-sg.h>
+
+/* system controller registers */
+#define SYS_IRQ_SOURCE_CTL 0x24
+#define SYS_IRQ_OUTPUT_EN 0x28
+#define SYS_IRQ_OUTPUT_DATA 0x2C
+#define SYS_IRQ_INPUT_DATA 0x30
+#define SYS_FPGA_CONFIG_STATUS 0x44
+
+/* GPIO IRQ line assignment */
+#define IRQ_CORL_DONE 0x10
+
+/* FPGA registers */
+#define MMAP_REG_VERSION 0x00
+#define MMAP_REG_CORL_CONF1 0x08
+#define MMAP_REG_CORL_CONF2 0x0C
+#define MMAP_REG_STATUS 0x48
+
+#define SYS_FPGA_BLOCK 0xF0000000
+
+#define DATA_FPGA_START 0x400000
+#define DATA_FPGA_SIZE 0x80000
+
+static const char drv_name[] = "carma-fpga";
+
+#define NUM_FPGA 4
+
+#define MIN_DATA_BUFS 8
+#define MAX_DATA_BUFS 64
+
+struct fpga_info {
+ unsigned int num_lag_ram;
+ unsigned int blk_size;
+};
+
+struct data_buf {
+ struct list_head entry;
+ struct videobuf_dmabuf vb;
+ size_t size;
+};
+
+struct fpga_device {
+ /* character device */
+ struct miscdevice miscdev;
+ struct device *dev;
+ struct mutex mutex;
+
+ /* reference count */
+ struct kref ref;
+
+ /* FPGA registers and information */
+ struct fpga_info info[NUM_FPGA];
+ void __iomem *regs;
+ int irq;
+
+ /* FPGA Physical Address/Size Information */
+ resource_size_t phys_addr;
+ size_t phys_size;
+
+ /* DMA structures */
+ struct sg_table corl_table;
+ unsigned int corl_nents;
+ struct dma_chan *chan;
+
+ /* Protection for all members below */
+ spinlock_t lock;
+
+ /* Device enable/disable flag */
+ bool enabled;
+
+ /* Correlation data buffers */
+ wait_queue_head_t wait;
+ struct list_head free;
+ struct list_head used;
+ struct data_buf *inflight;
+
+ /* Information about data buffers */
+ unsigned int num_dropped;
+ unsigned int num_buffers;
+ size_t bufsize;
+ struct dentry *dbg_entry;
+};
+
+struct fpga_reader {
+ struct fpga_device *priv;
+ struct data_buf *buf;
+ off_t buf_start;
+};
+
+static void fpga_device_release(struct kref *ref)
+{
+ struct fpga_device *priv = container_of(ref, struct fpga_device, ref);
+
+ /* the last reader has exited, cleanup the last bits */
+ mutex_destroy(&priv->mutex);
+ kfree(priv);
+}
+
+/*
+ * Data Buffer Allocation Helpers
+ */
+
+/**
+ * data_free_buffer() - free a single data buffer and all allocated memory
+ * @buf: the buffer to free
+ *
+ * This will free all of the pages allocated to the given data buffer, and
+ * then free the structure itself
+ */
+static void data_free_buffer(struct data_buf *buf)
+{
+ /* It is ok to free a NULL buffer */
+ if (!buf)
+ return;
+
+ /* free all memory */
+ videobuf_dma_free(&buf->vb);
+ kfree(buf);
+}
+
+/**
+ * data_alloc_buffer() - allocate and fill a data buffer with pages
+ * @bytes: the number of bytes required
+ *
+ * This allocates all space needed for a data buffer. It must be mapped before
+ * use in a DMA transaction using videobuf_dma_map().
+ *
+ * Returns NULL on failure
+ */
+static struct data_buf *data_alloc_buffer(const size_t bytes)
+{
+ unsigned int nr_pages;
+ struct data_buf *buf;
+ int ret;
+
+ /* calculate the number of pages necessary */
+ nr_pages = DIV_ROUND_UP(bytes, PAGE_SIZE);
+
+ /* allocate the buffer structure */
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ goto out_return;
+
+ /* initialize internal fields */
+ INIT_LIST_HEAD(&buf->entry);
+ buf->size = bytes;
+
+ /* allocate the videobuf */
+ videobuf_dma_init(&buf->vb);
+ ret = videobuf_dma_init_kernel(&buf->vb, DMA_FROM_DEVICE, nr_pages);
+ if (ret)
+ goto out_free_buf;
+
+ return buf;
+
+out_free_buf:
+ kfree(buf);
+out_return:
+ return NULL;
+}
+
+/**
+ * data_free_buffers() - free all allocated buffers
+ * @priv: the driver's private data structure
+ *
+ * Free all buffers allocated by the driver (except those currently in the
+ * process of being read by userspace).
+ *
+ * LOCKING: must hold dev->mutex
+ * CONTEXT: user
+ */
+static void data_free_buffers(struct fpga_device *priv)
+{
+ struct data_buf *buf, *tmp;
+
+ /* the device should be stopped, no DMA in progress */
+ BUG_ON(priv->inflight != NULL);
+
+ list_for_each_entry_safe(buf, tmp, &priv->free, entry) {
+ list_del_init(&buf->entry);
+ videobuf_dma_unmap(priv->dev, &buf->vb);
+ data_free_buffer(buf);
+ }
+
+ list_for_each_entry_safe(buf, tmp, &priv->used, entry) {
+ list_del_init(&buf->entry);
+ videobuf_dma_unmap(priv->dev, &buf->vb);
+ data_free_buffer(buf);
+ }
+
+ priv->num_buffers = 0;
+ priv->bufsize = 0;
+}
+
+/**
+ * data_alloc_buffers() - allocate 1 seconds worth of data buffers
+ * @priv: the driver's private data structure
+ *
+ * Allocate enough buffers for a whole second worth of data
+ *
+ * This routine will attempt to degrade nicely by succeeding even if a full
+ * second worth of data buffers could not be allocated, as long as a minimum
+ * number were allocated. In this case, it will print a message to the kernel
+ * log.
+ *
+ * The device must not be modifying any lists when this is called.
+ *
+ * CONTEXT: user
+ * LOCKING: must hold dev->mutex
+ *
+ * Returns 0 on success, -ERRNO otherwise
+ */
+static int data_alloc_buffers(struct fpga_device *priv)
+{
+ struct data_buf *buf;
+ int i, ret;
+
+ for (i = 0; i < MAX_DATA_BUFS; i++) {
+
+ /* allocate a buffer */
+ buf = data_alloc_buffer(priv->bufsize);
+ if (!buf)
+ break;
+
+ /* map it for DMA */
+ ret = videobuf_dma_map(priv->dev, &buf->vb);
+ if (ret) {
+ data_free_buffer(buf);
+ break;
+ }
+
+ /* add it to the list of free buffers */
+ list_add_tail(&buf->entry, &priv->free);
+ priv->num_buffers++;
+ }
+
+ /* Make sure we allocated the minimum required number of buffers */
+ if (priv->num_buffers < MIN_DATA_BUFS) {
+ dev_err(priv->dev, "Unable to allocate enough data buffers\n");
+ data_free_buffers(priv);
+ return -ENOMEM;
+ }
+
+ /* Warn if we are running in a degraded state, but do not fail */
+ if (priv->num_buffers < MAX_DATA_BUFS) {
+ dev_warn(priv->dev,
+ "Unable to allocate %d buffers, using %d buffers instead\n",
+ MAX_DATA_BUFS, i);
+ }
+
+ return 0;
+}
+
+/*
+ * DMA Operations Helpers
+ */
+
+/**
+ * fpga_start_addr() - get the physical address a DATA-FPGA
+ * @priv: the driver's private data structure
+ * @fpga: the DATA-FPGA number (zero based)
+ */
+static dma_addr_t fpga_start_addr(struct fpga_device *priv, unsigned int fpga)
+{
+ return priv->phys_addr + 0x400000 + (0x80000 * fpga);
+}
+
+/**
+ * fpga_block_addr() - get the physical address of a correlation data block
+ * @priv: the driver's private data structure
+ * @fpga: the DATA-FPGA number (zero based)
+ * @blknum: the correlation block number (zero based)
+ */
+static dma_addr_t fpga_block_addr(struct fpga_device *priv, unsigned int fpga,
+ unsigned int blknum)
+{
+ return fpga_start_addr(priv, fpga) + (0x10000 * (1 + blknum));
+}
+
+#define REG_BLOCK_SIZE (32 * 4)
+
+/**
+ * data_setup_corl_table() - create the scatterlist for correlation dumps
+ * @priv: the driver's private data structure
+ *
+ * Create the scatterlist for transferring a correlation dump from the
+ * DATA FPGAs. This structure will be reused for each buffer than needs
+ * to be filled with correlation data.
+ *
+ * Returns 0 on success, -ERRNO otherwise
+ */
+static int data_setup_corl_table(struct fpga_device *priv)
+{
+ struct sg_table *table = &priv->corl_table;
+ struct scatterlist *sg;
+ struct fpga_info *info;
+ int i, j, ret;
+
+ /* Calculate the number of entries needed */
+ priv->corl_nents = (1 + NUM_FPGA) * REG_BLOCK_SIZE;
+ for (i = 0; i < NUM_FPGA; i++)
+ priv->corl_nents += priv->info[i].num_lag_ram;
+
+ /* Allocate the scatterlist table */
+ ret = sg_alloc_table(table, priv->corl_nents, GFP_KERNEL);
+ if (ret) {
+ dev_err(priv->dev, "unable to allocate DMA table\n");
+ return ret;
+ }
+
+ /* Add the DATA FPGA registers to the scatterlist */
+ sg = table->sgl;
+ for (i = 0; i < NUM_FPGA; i++) {
+ sg_dma_address(sg) = fpga_start_addr(priv, i);
+ sg_dma_len(sg) = REG_BLOCK_SIZE;
+ sg = sg_next(sg);
+ }
+
+ /* Add the SYS-FPGA registers to the scatterlist */
+ sg_dma_address(sg) = SYS_FPGA_BLOCK;
+ sg_dma_len(sg) = REG_BLOCK_SIZE;
+ sg = sg_next(sg);
+
+ /* Add the FPGA correlation data blocks to the scatterlist */
+ for (i = 0; i < NUM_FPGA; i++) {
+ info = &priv->info[i];
+ for (j = 0; j < info->num_lag_ram; j++) {
+ sg_dma_address(sg) = fpga_block_addr(priv, i, j);
+ sg_dma_len(sg) = info->blk_size;
+ sg = sg_next(sg);
+ }
+ }
+
+ /*
+ * All physical addresses and lengths are present in the structure
+ * now. It can be reused for every FPGA DATA interrupt
+ */
+ return 0;
+}
+
+/*
+ * FPGA Register Access Helpers
+ */
+
+static void fpga_write_reg(struct fpga_device *priv, unsigned int fpga,
+ unsigned int reg, u32 val)
+{
+ const int fpga_start = DATA_FPGA_START + (fpga * DATA_FPGA_SIZE);
+ iowrite32be(val, priv->regs + fpga_start + reg);
+}
+
+static u32 fpga_read_reg(struct fpga_device *priv, unsigned int fpga,
+ unsigned int reg)
+{
+ const int fpga_start = DATA_FPGA_START + (fpga * DATA_FPGA_SIZE);
+ return ioread32be(priv->regs + fpga_start + reg);
+}
+
+/**
+ * data_calculate_bufsize() - calculate the data buffer size required
+ * @priv: the driver's private data structure
+ *
+ * Calculate the total buffer size needed to hold a single block
+ * of correlation data
+ *
+ * CONTEXT: user
+ *
+ * Returns 0 on success, -ERRNO otherwise
+ */
+static int data_calculate_bufsize(struct fpga_device *priv)
+{
+ u32 num_corl, num_lags, num_meta, num_qcnt, num_pack;
+ u32 conf1, conf2, version;
+ u32 num_lag_ram, blk_size;
+ int i;
+
+ /* Each buffer starts with the 5 FPGA register areas */
+ priv->bufsize = (1 + NUM_FPGA) * REG_BLOCK_SIZE;
+
+ /* Read and store the configuration data for each FPGA */
+ for (i = 0; i < NUM_FPGA; i++) {
+ version = fpga_read_reg(priv, i, MMAP_REG_VERSION);
+ conf1 = fpga_read_reg(priv, i, MMAP_REG_CORL_CONF1);
+ conf2 = fpga_read_reg(priv, i, MMAP_REG_CORL_CONF2);
+
+ /* minor version 2 and later */
+ if ((version & 0x000000FF) >= 2) {
+ num_corl = (conf1 & 0x000000F0) >> 4;
+ num_pack = (conf1 & 0x00000F00) >> 8;
+ num_lags = (conf1 & 0x00FFF000) >> 12;
+ num_meta = (conf1 & 0x7F000000) >> 24;
+ num_qcnt = (conf2 & 0x00000FFF) >> 0;
+ } else {
+ num_corl = (conf1 & 0x000000F0) >> 4;
+ num_pack = 1; /* implied */
+ num_lags = (conf1 & 0x000FFF00) >> 8;
+ num_meta = (conf1 & 0x7FF00000) >> 20;
+ num_qcnt = (conf2 & 0x00000FFF) >> 0;
+ }
+
+ num_lag_ram = (num_corl + num_pack - 1) / num_pack;
+ blk_size = ((num_pack * num_lags) + num_meta + num_qcnt) * 8;
+
+ priv->info[i].num_lag_ram = num_lag_ram;
+ priv->info[i].blk_size = blk_size;
+ priv->bufsize += num_lag_ram * blk_size;
+
+ dev_dbg(priv->dev, "FPGA %d NUM_CORL: %d\n", i, num_corl);
+ dev_dbg(priv->dev, "FPGA %d NUM_PACK: %d\n", i, num_pack);
+ dev_dbg(priv->dev, "FPGA %d NUM_LAGS: %d\n", i, num_lags);
+ dev_dbg(priv->dev, "FPGA %d NUM_META: %d\n", i, num_meta);
+ dev_dbg(priv->dev, "FPGA %d NUM_QCNT: %d\n", i, num_qcnt);
+ dev_dbg(priv->dev, "FPGA %d BLK_SIZE: %d\n", i, blk_size);
+ }
+
+ dev_dbg(priv->dev, "TOTAL BUFFER SIZE: %zu bytes\n", priv->bufsize);
+ return 0;
+}
+
+/*
+ * Interrupt Handling
+ */
+
+/**
+ * data_disable_interrupts() - stop the device from generating interrupts
+ * @priv: the driver's private data structure
+ *
+ * Hide interrupts by switching to GPIO interrupt source
+ *
+ * LOCKING: must hold dev->lock
+ */
+static void data_disable_interrupts(struct fpga_device *priv)
+{
+ /* hide the interrupt by switching the IRQ driver to GPIO */
+ iowrite32be(0x2F, priv->regs + SYS_IRQ_SOURCE_CTL);
+}
+
+/**
+ * data_enable_interrupts() - allow the device to generate interrupts
+ * @priv: the driver's private data structure
+ *
+ * Unhide interrupts by switching to the FPGA interrupt source. At the
+ * same time, clear the DATA-FPGA status registers.
+ *
+ * LOCKING: must hold dev->lock
+ */
+static void data_enable_interrupts(struct fpga_device *priv)
+{
+ /* clear the actual FPGA corl_done interrupt */
+ fpga_write_reg(priv, 0, MMAP_REG_STATUS, 0x0);
+ fpga_write_reg(priv, 1, MMAP_REG_STATUS, 0x0);
+ fpga_write_reg(priv, 2, MMAP_REG_STATUS, 0x0);
+ fpga_write_reg(priv, 3, MMAP_REG_STATUS, 0x0);
+
+ /* flush the writes */
+ fpga_read_reg(priv, 0, MMAP_REG_STATUS);
+ fpga_read_reg(priv, 1, MMAP_REG_STATUS);
+ fpga_read_reg(priv, 2, MMAP_REG_STATUS);
+ fpga_read_reg(priv, 3, MMAP_REG_STATUS);
+
+ /* switch back to the external interrupt source */
+ iowrite32be(0x3F, priv->regs + SYS_IRQ_SOURCE_CTL);
+}
+
+/**
+ * data_dma_cb() - DMAEngine callback for DMA completion
+ * @data: the driver's private data structure
+ *
+ * Complete a DMA transfer from the DATA-FPGA's
+ *
+ * This is called via the DMA callback mechanism, and will handle moving the
+ * completed DMA transaction to the used list, and then wake any processes
+ * waiting for new data
+ *
+ * CONTEXT: any, softirq expected
+ */
+static void data_dma_cb(void *data)
+{
+ struct fpga_device *priv = data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* If there is no inflight buffer, we've got a bug */
+ BUG_ON(priv->inflight == NULL);
+
+ /* Move the inflight buffer onto the used list */
+ list_move_tail(&priv->inflight->entry, &priv->used);
+ priv->inflight = NULL;
+
+ /*
+ * If data dumping is still enabled, then clear the FPGA
+ * status registers and re-enable FPGA interrupts
+ */
+ if (priv->enabled)
+ data_enable_interrupts(priv);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /*
+ * We've changed both the inflight and used lists, so we need
+ * to wake up any processes that are blocking for those events
+ */
+ wake_up(&priv->wait);
+}
+
+/**
+ * data_submit_dma() - prepare and submit the required DMA to fill a buffer
+ * @priv: the driver's private data structure
+ * @buf: the data buffer
+ *
+ * Prepare and submit the necessary DMA transactions to fill a correlation
+ * data buffer.
+ *
+ * LOCKING: must hold dev->lock
+ * CONTEXT: hardirq only
+ *
+ * Returns 0 on success, -ERRNO otherwise
+ */
+static int data_submit_dma(struct fpga_device *priv, struct data_buf *buf)
+{
+ struct scatterlist *dst_sg, *src_sg;
+ unsigned int dst_nents, src_nents;
+ struct dma_chan *chan = priv->chan;
+ struct dma_async_tx_descriptor *tx;
+ dma_cookie_t cookie;
+ dma_addr_t dst, src;
+
+ dst_sg = buf->vb.sglist;
+ dst_nents = buf->vb.sglen;
+
+ src_sg = priv->corl_table.sgl;
+ src_nents = priv->corl_nents;
+
+ /*
+ * All buffers passed to this function should be ready and mapped
+ * for DMA already. Therefore, we don't need to do anything except
+ * submit it to the Freescale DMA Engine for processing
+ */
+
+ /* setup the scatterlist to scatterlist transfer */
+ tx = chan->device->device_prep_dma_sg(chan,
+ dst_sg, dst_nents,
+ src_sg, src_nents,
+ 0);
+ if (!tx) {
+ dev_err(priv->dev, "unable to prep scatterlist DMA\n");
+ return -ENOMEM;
+ }
+
+ /* submit the transaction to the DMA controller */
+ cookie = tx->tx_submit(tx);
+ if (dma_submit_error(cookie)) {
+ dev_err(priv->dev, "unable to submit scatterlist DMA\n");
+ return -ENOMEM;
+ }
+
+ /* Prepare the re-read of the SYS-FPGA block */
+ dst = sg_dma_address(dst_sg) + (NUM_FPGA * REG_BLOCK_SIZE);
+ src = SYS_FPGA_BLOCK;
+ tx = chan->device->device_prep_dma_memcpy(chan, dst, src,
+ REG_BLOCK_SIZE,
+ DMA_PREP_INTERRUPT);
+ if (!tx) {
+ dev_err(priv->dev, "unable to prep SYS-FPGA DMA\n");
+ return -ENOMEM;
+ }
+
+ /* Setup the callback */
+ tx->callback = data_dma_cb;
+ tx->callback_param = priv;
+
+ /* submit the transaction to the DMA controller */
+ cookie = tx->tx_submit(tx);
+ if (dma_submit_error(cookie)) {
+ dev_err(priv->dev, "unable to submit SYS-FPGA DMA\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+#define CORL_DONE 0x1
+#define CORL_ERR 0x2
+
+static irqreturn_t data_irq(int irq, void *dev_id)
+{
+ struct fpga_device *priv = dev_id;
+ bool submitted = false;
+ struct data_buf *buf;
+ u32 status;
+ int i;
+
+ /* detect spurious interrupts via FPGA status */
+ for (i = 0; i < 4; i++) {
+ status = fpga_read_reg(priv, i, MMAP_REG_STATUS);
+ if (!(status & (CORL_DONE | CORL_ERR))) {
+ dev_err(priv->dev, "spurious irq detected (FPGA)\n");
+ return IRQ_NONE;
+ }
+ }
+
+ /* detect spurious interrupts via raw IRQ pin readback */
+ status = ioread32be(priv->regs + SYS_IRQ_INPUT_DATA);
+ if (status & IRQ_CORL_DONE) {
+ dev_err(priv->dev, "spurious irq detected (IRQ)\n");
+ return IRQ_NONE;
+ }
+
+ spin_lock(&priv->lock);
+
+ /*
+ * This is an error case that should never happen.
+ *
+ * If this driver has a bug and manages to re-enable interrupts while
+ * a DMA is in progress, then we will hit this statement and should
+ * start paying attention immediately.
+ */
+ BUG_ON(priv->inflight != NULL);
+
+ /* hide the interrupt by switching the IRQ driver to GPIO */
+ data_disable_interrupts(priv);
+
+ /* If there are no free buffers, drop this data */
+ if (list_empty(&priv->free)) {
+ priv->num_dropped++;
+ goto out;
+ }
+
+ buf = list_first_entry(&priv->free, struct data_buf, entry);
+ list_del_init(&buf->entry);
+ BUG_ON(buf->size != priv->bufsize);
+
+ /* Submit a DMA transfer to get the correlation data */
+ if (data_submit_dma(priv, buf)) {
+ dev_err(priv->dev, "Unable to setup DMA transfer\n");
+ list_move_tail(&buf->entry, &priv->free);
+ goto out;
+ }
+
+ /* Save the buffer for the DMA callback */
+ priv->inflight = buf;
+ submitted = true;
+
+ /* Start the DMA Engine */
+ dma_async_memcpy_issue_pending(priv->chan);
+
+out:
+ /* If no DMA was submitted, re-enable interrupts */
+ if (!submitted)
+ data_enable_interrupts(priv);
+
+ spin_unlock(&priv->lock);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Realtime Device Enable Helpers
+ */
+
+/**
+ * data_device_enable() - enable the device for buffered dumping
+ * @priv: the driver's private data structure
+ *
+ * Enable the device for buffered dumping. Allocates buffers and hooks up
+ * the interrupt handler. When this finishes, data will come pouring in.
+ *
+ * LOCKING: must hold dev->mutex
+ * CONTEXT: user context only
+ *
+ * Returns 0 on success, -ERRNO otherwise
+ */
+static int data_device_enable(struct fpga_device *priv)
+{
+ bool enabled;
+ u32 val;
+ int ret;
+
+ /* multiple enables are safe: they do nothing */
+ spin_lock_irq(&priv->lock);
+ enabled = priv->enabled;
+ spin_unlock_irq(&priv->lock);
+ if (enabled)
+ return 0;
+
+ /* check that the FPGAs are programmed */
+ val = ioread32be(priv->regs + SYS_FPGA_CONFIG_STATUS);
+ if (!(val & (1 << 18))) {
+ dev_err(priv->dev, "DATA-FPGAs are not enabled\n");
+ return -ENODATA;
+ }
+
+ /* read the FPGAs to calculate the buffer size */
+ ret = data_calculate_bufsize(priv);
+ if (ret) {
+ dev_err(priv->dev, "unable to calculate buffer size\n");
+ goto out_error;
+ }
+
+ /* allocate the correlation data buffers */
+ ret = data_alloc_buffers(priv);
+ if (ret) {
+ dev_err(priv->dev, "unable to allocate buffers\n");
+ goto out_error;
+ }
+
+ /* setup the source scatterlist for dumping correlation data */
+ ret = data_setup_corl_table(priv);
+ if (ret) {
+ dev_err(priv->dev, "unable to setup correlation DMA table\n");
+ goto out_error;
+ }
+
+ /* prevent the FPGAs from generating interrupts */
+ data_disable_interrupts(priv);
+
+ /* hookup the irq handler */
+ ret = request_irq(priv->irq, data_irq, IRQF_SHARED, drv_name, priv);
+ if (ret) {
+ dev_err(priv->dev, "unable to request IRQ handler\n");
+ goto out_error;
+ }
+
+ /* allow the DMA callback to re-enable FPGA interrupts */
+ spin_lock_irq(&priv->lock);
+ priv->enabled = true;
+ spin_unlock_irq(&priv->lock);
+
+ /* allow the FPGAs to generate interrupts */
+ data_enable_interrupts(priv);
+ return 0;
+
+out_error:
+ sg_free_table(&priv->corl_table);
+ priv->corl_nents = 0;
+
+ data_free_buffers(priv);
+ return ret;
+}
+
+/**
+ * data_device_disable() - disable the device for buffered dumping
+ * @priv: the driver's private data structure
+ *
+ * Disable the device for buffered dumping. Stops new DMA transactions from
+ * being generated, waits for all outstanding DMA to complete, and then frees
+ * all buffers.
+ *
+ * LOCKING: must hold dev->mutex
+ * CONTEXT: user only
+ *
+ * Returns 0 on success, -ERRNO otherwise
+ */
+static int data_device_disable(struct fpga_device *priv)
+{
+ spin_lock_irq(&priv->lock);
+
+ /* allow multiple disable */
+ if (!priv->enabled) {
+ spin_unlock_irq(&priv->lock);
+ return 0;
+ }
+
+ /*
+ * Mark the device disabled
+ *
+ * This stops DMA callbacks from re-enabling interrupts
+ */
+ priv->enabled = false;
+
+ /* prevent the FPGAs from generating interrupts */
+ data_disable_interrupts(priv);
+
+ /* wait until all ongoing DMA has finished */
+ while (priv->inflight != NULL) {
+ spin_unlock_irq(&priv->lock);
+ wait_event(priv->wait, priv->inflight == NULL);
+ spin_lock_irq(&priv->lock);
+ }
+
+ spin_unlock_irq(&priv->lock);
+
+ /* unhook the irq handler */
+ free_irq(priv->irq, priv);
+
+ /* free the correlation table */
+ sg_free_table(&priv->corl_table);
+ priv->corl_nents = 0;
+
+ /* free all buffers: the free and used lists are not being changed */
+ data_free_buffers(priv);
+ return 0;
+}
+
+/*
+ * DEBUGFS Interface
+ */
+#ifdef CONFIG_DEBUG_FS
+
+/*
+ * Count the number of entries in the given list
+ */
+static unsigned int list_num_entries(struct list_head *list)
+{
+ struct list_head *entry;
+ unsigned int ret = 0;
+
+ list_for_each(entry, list)
+ ret++;
+
+ return ret;
+}
+
+static int data_debug_show(struct seq_file *f, void *offset)
+{
+ struct fpga_device *priv = f->private;
+
+ spin_lock_irq(&priv->lock);
+
+ seq_printf(f, "enabled: %d\n", priv->enabled);
+ seq_printf(f, "bufsize: %d\n", priv->bufsize);
+ seq_printf(f, "num_buffers: %d\n", priv->num_buffers);
+ seq_printf(f, "num_free: %d\n", list_num_entries(&priv->free));
+ seq_printf(f, "inflight: %d\n", priv->inflight != NULL);
+ seq_printf(f, "num_used: %d\n", list_num_entries(&priv->used));
+ seq_printf(f, "num_dropped: %d\n", priv->num_dropped);
+
+ spin_unlock_irq(&priv->lock);
+ return 0;
+}
+
+static int data_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, data_debug_show, inode->i_private);
+}
+
+static const struct file_operations data_debug_fops = {
+ .owner = THIS_MODULE,
+ .open = data_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int data_debugfs_init(struct fpga_device *priv)
+{
+ priv->dbg_entry = debugfs_create_file(drv_name, S_IRUGO, NULL, priv,
+ &data_debug_fops);
+ if (IS_ERR(priv->dbg_entry))
+ return PTR_ERR(priv->dbg_entry);
+
+ return 0;
+}
+
+static void data_debugfs_exit(struct fpga_device *priv)
+{
+ debugfs_remove(priv->dbg_entry);
+}
+
+#else
+
+static inline int data_debugfs_init(struct fpga_device *priv)
+{
+ return 0;
+}
+
+static inline void data_debugfs_exit(struct fpga_device *priv)
+{
+}
+
+#endif /* CONFIG_DEBUG_FS */
+
+/*
+ * SYSFS Attributes
+ */
+
+static ssize_t data_en_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct fpga_device *priv = dev_get_drvdata(dev);
+ int ret;
+
+ spin_lock_irq(&priv->lock);
+ ret = snprintf(buf, PAGE_SIZE, "%u\n", priv->enabled);
+ spin_unlock_irq(&priv->lock);
+
+ return ret;
+}
+
+static ssize_t data_en_set(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fpga_device *priv = dev_get_drvdata(dev);
+ unsigned long enable;
+ int ret;
+
+ ret = strict_strtoul(buf, 0, &enable);
+ if (ret) {
+ dev_err(priv->dev, "unable to parse enable input\n");
+ return -EINVAL;
+ }
+
+ /* protect against concurrent enable/disable */
+ ret = mutex_lock_interruptible(&priv->mutex);
+ if (ret)
+ return ret;
+
+ if (enable)
+ ret = data_device_enable(priv);
+ else
+ ret = data_device_disable(priv);
+
+ if (ret) {
+ dev_err(priv->dev, "device %s failed\n",
+ enable ? "enable" : "disable");
+ count = ret;
+ goto out_unlock;
+ }
+
+out_unlock:
+ mutex_unlock(&priv->mutex);
+ return count;
+}
+
+static DEVICE_ATTR(enable, S_IWUSR | S_IRUGO, data_en_show, data_en_set);
+
+static struct attribute *data_sysfs_attrs[] = {
+ &dev_attr_enable.attr,
+ NULL,
+};
+
+static const struct attribute_group rt_sysfs_attr_group = {
+ .attrs = data_sysfs_attrs,
+};
+
+/*
+ * FPGA Realtime Data Character Device
+ */
+
+static int data_open(struct inode *inode, struct file *filp)
+{
+ /*
+ * The miscdevice layer puts our struct miscdevice into the
+ * filp->private_data field. We use this to find our private
+ * data and then overwrite it with our own private structure.
+ */
+ struct fpga_device *priv = container_of(filp->private_data,
+ struct fpga_device, miscdev);
+ struct fpga_reader *reader;
+ int ret;
+
+ /* allocate private data */
+ reader = kzalloc(sizeof(*reader), GFP_KERNEL);
+ if (!reader)
+ return -ENOMEM;
+
+ reader->priv = priv;
+ reader->buf = NULL;
+
+ filp->private_data = reader;
+ ret = nonseekable_open(inode, filp);
+ if (ret) {
+ dev_err(priv->dev, "nonseekable-open failed\n");
+ kfree(reader);
+ return ret;
+ }
+
+ /*
+ * success, increase the reference count of the private data structure
+ * so that it doesn't disappear if the device is unbound
+ */
+ kref_get(&priv->ref);
+ return 0;
+}
+
+static int data_release(struct inode *inode, struct file *filp)
+{
+ struct fpga_reader *reader = filp->private_data;
+ struct fpga_device *priv = reader->priv;
+
+ /* free the per-reader structure */
+ data_free_buffer(reader->buf);
+ kfree(reader);
+ filp->private_data = NULL;
+
+ /* decrement our reference count to the private data */
+ kref_put(&priv->ref, fpga_device_release);
+ return 0;
+}
+
+static ssize_t data_read(struct file *filp, char __user *ubuf, size_t count,
+ loff_t *f_pos)
+{
+ struct fpga_reader *reader = filp->private_data;
+ struct fpga_device *priv = reader->priv;
+ struct list_head *used = &priv->used;
+ bool drop_buffer = false;
+ struct data_buf *dbuf;
+ size_t avail;
+ void *data;
+ int ret;
+
+ /* check if we already have a partial buffer */
+ if (reader->buf) {
+ dbuf = reader->buf;
+ goto have_buffer;
+ }
+
+ spin_lock_irq(&priv->lock);
+
+ /* Block until there is at least one buffer on the used list */
+ while (list_empty(used)) {
+ spin_unlock_irq(&priv->lock);
+
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ ret = wait_event_interruptible(priv->wait, !list_empty(used));
+ if (ret)
+ return ret;
+
+ spin_lock_irq(&priv->lock);
+ }
+
+ /* Grab the first buffer off of the used list */
+ dbuf = list_first_entry(used, struct data_buf, entry);
+ list_del_init(&dbuf->entry);
+
+ spin_unlock_irq(&priv->lock);
+
+ /* Buffers are always mapped: unmap it */
+ videobuf_dma_unmap(priv->dev, &dbuf->vb);
+
+ /* save the buffer for later */
+ reader->buf = dbuf;
+ reader->buf_start = 0;
+
+have_buffer:
+ /* Get the number of bytes available */
+ avail = dbuf->size - reader->buf_start;
+ data = dbuf->vb.vaddr + reader->buf_start;
+
+ /* Get the number of bytes we can transfer */
+ count = min(count, avail);
+
+ /* Copy the data to the userspace buffer */
+ if (copy_to_user(ubuf, data, count))
+ return -EFAULT;
+
+ /* Update the amount of available space */
+ avail -= count;
+
+ /*
+ * If there is still some data available, save the buffer for the
+ * next userspace call to read() and return
+ */
+ if (avail > 0) {
+ reader->buf_start += count;
+ reader->buf = dbuf;
+ return count;
+ }
+
+ /*
+ * Get the buffer ready to be reused for DMA
+ *
+ * If it fails, we pretend that the read never happed and return
+ * -EFAULT to userspace. The read will be retried.
+ */
+ ret = videobuf_dma_map(priv->dev, &dbuf->vb);
+ if (ret) {
+ dev_err(priv->dev, "unable to remap buffer for DMA\n");
+ return -EFAULT;
+ }
+
+ /* Lock against concurrent enable/disable */
+ spin_lock_irq(&priv->lock);
+
+ /* the reader is finished with this buffer */
+ reader->buf = NULL;
+
+ /*
+ * One of two things has happened, the device is disabled, or the
+ * device has been reconfigured underneath us. In either case, we
+ * should just throw away the buffer.
+ *
+ * Lockdep complains if this is done under the spinlock, so we
+ * handle it during the unlock path.
+ */
+ if (!priv->enabled || dbuf->size != priv->bufsize) {
+ drop_buffer = true;
+ goto out_unlock;
+ }
+
+ /* The buffer is safe to reuse, so add it back to the free list */
+ list_add_tail(&dbuf->entry, &priv->free);
+
+out_unlock:
+ spin_unlock_irq(&priv->lock);
+
+ if (drop_buffer) {
+ videobuf_dma_unmap(priv->dev, &dbuf->vb);
+ data_free_buffer(dbuf);
+ }
+
+ return count;
+}
+
+static unsigned int data_poll(struct file *filp, struct poll_table_struct *tbl)
+{
+ struct fpga_reader *reader = filp->private_data;
+ struct fpga_device *priv = reader->priv;
+ unsigned int mask = 0;
+
+ poll_wait(filp, &priv->wait, tbl);
+
+ if (!list_empty(&priv->used))
+ mask |= POLLIN | POLLRDNORM;
+
+ return mask;
+}
+
+static int data_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct fpga_reader *reader = filp->private_data;
+ struct fpga_device *priv = reader->priv;
+ unsigned long offset, vsize, psize, addr;
+
+ /* VMA properties */
+ offset = vma->vm_pgoff << PAGE_SHIFT;
+ vsize = vma->vm_end - vma->vm_start;
+ psize = priv->phys_size - offset;
+ addr = (priv->phys_addr + offset) >> PAGE_SHIFT;
+
+ /* Check against the FPGA region's physical memory size */
+ if (vsize > psize) {
+ dev_err(priv->dev, "requested mmap mapping too large\n");
+ return -EINVAL;
+ }
+
+ /* IO memory (stop cacheing) */
+ vma->vm_flags |= VM_IO | VM_RESERVED;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ return io_remap_pfn_range(vma, vma->vm_start, addr, vsize,
+ vma->vm_page_prot);
+}
+
+static const struct file_operations data_fops = {
+ .owner = THIS_MODULE,
+ .open = data_open,
+ .release = data_release,
+ .read = data_read,
+ .poll = data_poll,
+ .mmap = data_mmap,
+ .llseek = no_llseek,
+};
+
+/*
+ * OpenFirmware Device Subsystem
+ */
+
+static bool dma_filter(struct dma_chan *chan, void *data)
+{
+ /*
+ * DMA Channel #0 is used for the FPGA Programmer, so ignore it
+ *
+ * This probably won't survive an unload/load cycle of the Freescale
+ * DMAEngine driver, but that won't be a problem
+ */
+ if (chan->chan_id == 0 && chan->device->dev_id == 0)
+ return false;
+
+ return true;
+}
+
+static int data_of_probe(struct platform_device *op)
+{
+ struct device_node *of_node = op->dev.of_node;
+ struct device *this_device;
+ struct fpga_device *priv;
+ struct resource res;
+ dma_cap_mask_t mask;
+ int ret;
+
+ /* Allocate private data */
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(&op->dev, "Unable to allocate device private data\n");
+ ret = -ENOMEM;
+ goto out_return;
+ }
+
+ dev_set_drvdata(&op->dev, priv);
+ priv->dev = &op->dev;
+ kref_init(&priv->ref);
+ mutex_init(&priv->mutex);
+
+ dev_set_drvdata(priv->dev, priv);
+ spin_lock_init(&priv->lock);
+ INIT_LIST_HEAD(&priv->free);
+ INIT_LIST_HEAD(&priv->used);
+ init_waitqueue_head(&priv->wait);
+
+ /* Setup the misc device */
+ priv->miscdev.minor = MISC_DYNAMIC_MINOR;
+ priv->miscdev.name = drv_name;
+ priv->miscdev.fops = &data_fops;
+
+ /* Get the physical address of the FPGA registers */
+ ret = of_address_to_resource(of_node, 0, &res);
+ if (ret) {
+ dev_err(&op->dev, "Unable to find FPGA physical address\n");
+ ret = -ENODEV;
+ goto out_free_priv;
+ }
+
+ priv->phys_addr = res.start;
+ priv->phys_size = resource_size(&res);
+
+ /* ioremap the registers for use */
+ priv->regs = of_iomap(of_node, 0);
+ if (!priv->regs) {
+ dev_err(&op->dev, "Unable to ioremap registers\n");
+ ret = -ENOMEM;
+ goto out_free_priv;
+ }
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
+ dma_cap_set(DMA_INTERRUPT, mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ dma_cap_set(DMA_SG, mask);
+
+ /* Request a DMA channel */
+ priv->chan = dma_request_channel(mask, dma_filter, NULL);
+ if (!priv->chan) {
+ dev_err(&op->dev, "Unable to request DMA channel\n");
+ ret = -ENODEV;
+ goto out_unmap_regs;
+ }
+
+ /* Find the correct IRQ number */
+ priv->irq = irq_of_parse_and_map(of_node, 0);
+ if (priv->irq == NO_IRQ) {
+ dev_err(&op->dev, "Unable to find IRQ line\n");
+ ret = -ENODEV;
+ goto out_release_dma;
+ }
+
+ /* Drive the GPIO for FPGA IRQ high (no interrupt) */
+ iowrite32be(IRQ_CORL_DONE, priv->regs + SYS_IRQ_OUTPUT_DATA);
+
+ /* Register the miscdevice */
+ ret = misc_register(&priv->miscdev);
+ if (ret) {
+ dev_err(&op->dev, "Unable to register miscdevice\n");
+ goto out_irq_dispose_mapping;
+ }
+
+ /* Create the debugfs files */
+ ret = data_debugfs_init(priv);
+ if (ret) {
+ dev_err(&op->dev, "Unable to create debugfs files\n");
+ goto out_misc_deregister;
+ }
+
+ /* Create the sysfs files */
+ this_device = priv->miscdev.this_device;
+ dev_set_drvdata(this_device, priv);
+ ret = sysfs_create_group(&this_device->kobj, &rt_sysfs_attr_group);
+ if (ret) {
+ dev_err(&op->dev, "Unable to create sysfs files\n");
+ goto out_data_debugfs_exit;
+ }
+
+ dev_info(&op->dev, "CARMA FPGA Realtime Data Driver Loaded\n");
+ return 0;
+
+out_data_debugfs_exit:
+ data_debugfs_exit(priv);
+out_misc_deregister:
+ misc_deregister(&priv->miscdev);
+out_irq_dispose_mapping:
+ irq_dispose_mapping(priv->irq);
+out_release_dma:
+ dma_release_channel(priv->chan);
+out_unmap_regs:
+ iounmap(priv->regs);
+out_free_priv:
+ kref_put(&priv->ref, fpga_device_release);
+out_return:
+ return ret;
+}
+
+static int data_of_remove(struct platform_device *op)
+{
+ struct fpga_device *priv = dev_get_drvdata(&op->dev);
+ struct device *this_device = priv->miscdev.this_device;
+
+ /* remove all sysfs files, now the device cannot be re-enabled */
+ sysfs_remove_group(&this_device->kobj, &rt_sysfs_attr_group);
+
+ /* remove all debugfs files */
+ data_debugfs_exit(priv);
+
+ /* disable the device from generating data */
+ data_device_disable(priv);
+
+ /* remove the character device to stop new readers from appearing */
+ misc_deregister(&priv->miscdev);
+
+ /* cleanup everything not needed by readers */
+ irq_dispose_mapping(priv->irq);
+ dma_release_channel(priv->chan);
+ iounmap(priv->regs);
+
+ /* release our reference */
+ kref_put(&priv->ref, fpga_device_release);
+ return 0;
+}
+
+static struct of_device_id data_of_match[] = {
+ { .compatible = "carma,carma-fpga", },
+ {},
+};
+
+static struct platform_driver data_of_driver = {
+ .probe = data_of_probe,
+ .remove = data_of_remove,
+ .driver = {
+ .name = drv_name,
+ .of_match_table = data_of_match,
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(data_of_driver);
+
+MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
+MODULE_DESCRIPTION("CARMA DATA-FPGA Access Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/cb710/Kconfig b/drivers/misc/cb710/Kconfig
new file mode 100644
index 00000000..22429b8b
--- /dev/null
+++ b/drivers/misc/cb710/Kconfig
@@ -0,0 +1,25 @@
+config CB710_CORE
+ tristate "ENE CB710/720 Flash memory card reader support"
+ depends on PCI
+ help
+ This option enables support for PCI ENE CB710/720 Flash memory card
+ reader found in some laptops (ie. some versions of HP Compaq nx9500).
+
+ You will also have to select some flash card format drivers (MMC/SD,
+ MemoryStick).
+
+ This driver can also be built as a module. If so, the module
+ will be called cb710.
+
+config CB710_DEBUG
+ bool "Enable driver debugging"
+ depends on CB710_CORE != n
+ default n
+ help
+ This is an option for use by developers; most people should
+ say N here. This adds a lot of debugging output to dmesg.
+
+config CB710_DEBUG_ASSUMPTIONS
+ bool
+ depends on CB710_CORE != n
+ default y
diff --git a/drivers/misc/cb710/Makefile b/drivers/misc/cb710/Makefile
new file mode 100644
index 00000000..467c8e9c
--- /dev/null
+++ b/drivers/misc/cb710/Makefile
@@ -0,0 +1,6 @@
+ccflags-$(CONFIG_CB710_DEBUG) := -DDEBUG
+
+obj-$(CONFIG_CB710_CORE) += cb710.o
+
+cb710-y := core.o sgbuf2.o
+cb710-$(CONFIG_CB710_DEBUG) += debug.o
diff --git a/drivers/misc/cb710/core.c b/drivers/misc/cb710/core.c
new file mode 100644
index 00000000..85cc7710
--- /dev/null
+++ b/drivers/misc/cb710/core.c
@@ -0,0 +1,359 @@
+/*
+ * cb710/core.c
+ *
+ * Copyright by Michał Mirosław, 2008-2009
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/idr.h>
+#include <linux/cb710.h>
+#include <linux/gfp.h>
+
+static DEFINE_IDA(cb710_ida);
+static DEFINE_SPINLOCK(cb710_ida_lock);
+
+void cb710_pci_update_config_reg(struct pci_dev *pdev,
+ int reg, uint32_t mask, uint32_t xor)
+{
+ u32 rval;
+
+ pci_read_config_dword(pdev, reg, &rval);
+ rval = (rval & mask) ^ xor;
+ pci_write_config_dword(pdev, reg, rval);
+}
+EXPORT_SYMBOL_GPL(cb710_pci_update_config_reg);
+
+/* Some magic writes based on Windows driver init code */
+static int __devinit cb710_pci_configure(struct pci_dev *pdev)
+{
+ unsigned int devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
+ struct pci_dev *pdev0;
+ u32 val;
+
+ cb710_pci_update_config_reg(pdev, 0x48,
+ ~0x000000FF, 0x0000003F);
+
+ pci_read_config_dword(pdev, 0x48, &val);
+ if (val & 0x80000000)
+ return 0;
+
+ pdev0 = pci_get_slot(pdev->bus, devfn);
+ if (!pdev0)
+ return -ENODEV;
+
+ if (pdev0->vendor == PCI_VENDOR_ID_ENE
+ && pdev0->device == PCI_DEVICE_ID_ENE_720) {
+ cb710_pci_update_config_reg(pdev0, 0x8C,
+ ~0x00F00000, 0x00100000);
+ cb710_pci_update_config_reg(pdev0, 0xB0,
+ ~0x08000000, 0x08000000);
+ }
+
+ cb710_pci_update_config_reg(pdev0, 0x8C,
+ ~0x00000F00, 0x00000200);
+ cb710_pci_update_config_reg(pdev0, 0x90,
+ ~0x00060000, 0x00040000);
+
+ pci_dev_put(pdev0);
+
+ return 0;
+}
+
+static irqreturn_t cb710_irq_handler(int irq, void *data)
+{
+ struct cb710_chip *chip = data;
+ struct cb710_slot *slot = &chip->slot[0];
+ irqreturn_t handled = IRQ_NONE;
+ unsigned nr;
+
+ spin_lock(&chip->irq_lock); /* incl. smp_rmb() */
+
+ for (nr = chip->slots; nr; ++slot, --nr) {
+ cb710_irq_handler_t handler_func = slot->irq_handler;
+ if (handler_func && handler_func(slot))
+ handled = IRQ_HANDLED;
+ }
+
+ spin_unlock(&chip->irq_lock);
+
+ return handled;
+}
+
+static void cb710_release_slot(struct device *dev)
+{
+#ifdef CONFIG_CB710_DEBUG_ASSUMPTIONS
+ struct cb710_slot *slot = cb710_pdev_to_slot(to_platform_device(dev));
+ struct cb710_chip *chip = cb710_slot_to_chip(slot);
+
+ /* slot struct can be freed now */
+ atomic_dec(&chip->slot_refs_count);
+#endif
+}
+
+static int __devinit cb710_register_slot(struct cb710_chip *chip,
+ unsigned slot_mask, unsigned io_offset, const char *name)
+{
+ int nr = chip->slots;
+ struct cb710_slot *slot = &chip->slot[nr];
+ int err;
+
+ dev_dbg(cb710_chip_dev(chip),
+ "register: %s.%d; slot %d; mask %d; IO offset: 0x%02X\n",
+ name, chip->platform_id, nr, slot_mask, io_offset);
+
+ /* slot->irq_handler == NULL here; this needs to be
+ * seen before platform_device_register() */
+ ++chip->slots;
+ smp_wmb();
+
+ slot->iobase = chip->iobase + io_offset;
+ slot->pdev.name = name;
+ slot->pdev.id = chip->platform_id;
+ slot->pdev.dev.parent = &chip->pdev->dev;
+ slot->pdev.dev.release = cb710_release_slot;
+
+ err = platform_device_register(&slot->pdev);
+
+#ifdef CONFIG_CB710_DEBUG_ASSUMPTIONS
+ atomic_inc(&chip->slot_refs_count);
+#endif
+
+ if (err) {
+ /* device_initialize() called from platform_device_register()
+ * wants this on error path */
+ platform_device_put(&slot->pdev);
+
+ /* slot->irq_handler == NULL here anyway, so no lock needed */
+ --chip->slots;
+ return err;
+ }
+
+ chip->slot_mask |= slot_mask;
+
+ return 0;
+}
+
+static void cb710_unregister_slot(struct cb710_chip *chip,
+ unsigned slot_mask)
+{
+ int nr = chip->slots - 1;
+
+ if (!(chip->slot_mask & slot_mask))
+ return;
+
+ platform_device_unregister(&chip->slot[nr].pdev);
+
+ /* complementary to spin_unlock() in cb710_set_irq_handler() */
+ smp_rmb();
+ BUG_ON(chip->slot[nr].irq_handler != NULL);
+
+ /* slot->irq_handler == NULL here, so no lock needed */
+ --chip->slots;
+ chip->slot_mask &= ~slot_mask;
+}
+
+void cb710_set_irq_handler(struct cb710_slot *slot,
+ cb710_irq_handler_t handler)
+{
+ struct cb710_chip *chip = cb710_slot_to_chip(slot);
+ unsigned long flags;
+
+ spin_lock_irqsave(&chip->irq_lock, flags);
+ slot->irq_handler = handler;
+ spin_unlock_irqrestore(&chip->irq_lock, flags);
+}
+EXPORT_SYMBOL_GPL(cb710_set_irq_handler);
+
+#ifdef CONFIG_PM
+
+static int cb710_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct cb710_chip *chip = pci_get_drvdata(pdev);
+
+ free_irq(pdev->irq, chip);
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ if (state.event & PM_EVENT_SLEEP)
+ pci_set_power_state(pdev, PCI_D3cold);
+ return 0;
+}
+
+static int cb710_resume(struct pci_dev *pdev)
+{
+ struct cb710_chip *chip = pci_get_drvdata(pdev);
+ int err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ err = pcim_enable_device(pdev);
+ if (err)
+ return err;
+
+ return devm_request_irq(&pdev->dev, pdev->irq,
+ cb710_irq_handler, IRQF_SHARED, KBUILD_MODNAME, chip);
+}
+
+#endif /* CONFIG_PM */
+
+static int __devinit cb710_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct cb710_chip *chip;
+ unsigned long flags;
+ u32 val;
+ int err;
+ int n = 0;
+
+ err = cb710_pci_configure(pdev);
+ if (err)
+ return err;
+
+ /* this is actually magic... */
+ pci_read_config_dword(pdev, 0x48, &val);
+ if (!(val & 0x80000000)) {
+ pci_write_config_dword(pdev, 0x48, val|0x71000000);
+ pci_read_config_dword(pdev, 0x48, &val);
+ }
+
+ dev_dbg(&pdev->dev, "PCI config[0x48] = 0x%08X\n", val);
+ if (!(val & 0x70000000))
+ return -ENODEV;
+ val = (val >> 28) & 7;
+ if (val & CB710_SLOT_MMC)
+ ++n;
+ if (val & CB710_SLOT_MS)
+ ++n;
+ if (val & CB710_SLOT_SM)
+ ++n;
+
+ chip = devm_kzalloc(&pdev->dev,
+ sizeof(*chip) + n * sizeof(*chip->slot), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ err = pcim_enable_device(pdev);
+ if (err)
+ return err;
+
+ err = pcim_iomap_regions(pdev, 0x0001, KBUILD_MODNAME);
+ if (err)
+ return err;
+
+ spin_lock_init(&chip->irq_lock);
+ chip->pdev = pdev;
+ chip->iobase = pcim_iomap_table(pdev)[0];
+
+ pci_set_drvdata(pdev, chip);
+
+ err = devm_request_irq(&pdev->dev, pdev->irq,
+ cb710_irq_handler, IRQF_SHARED, KBUILD_MODNAME, chip);
+ if (err)
+ return err;
+
+ do {
+ if (!ida_pre_get(&cb710_ida, GFP_KERNEL))
+ return -ENOMEM;
+
+ spin_lock_irqsave(&cb710_ida_lock, flags);
+ err = ida_get_new(&cb710_ida, &chip->platform_id);
+ spin_unlock_irqrestore(&cb710_ida_lock, flags);
+
+ if (err && err != -EAGAIN)
+ return err;
+ } while (err);
+
+
+ dev_info(&pdev->dev, "id %d, IO 0x%p, IRQ %d\n",
+ chip->platform_id, chip->iobase, pdev->irq);
+
+ if (val & CB710_SLOT_MMC) { /* MMC/SD slot */
+ err = cb710_register_slot(chip,
+ CB710_SLOT_MMC, 0x00, "cb710-mmc");
+ if (err)
+ return err;
+ }
+
+ if (val & CB710_SLOT_MS) { /* MemoryStick slot */
+ err = cb710_register_slot(chip,
+ CB710_SLOT_MS, 0x40, "cb710-ms");
+ if (err)
+ goto unreg_mmc;
+ }
+
+ if (val & CB710_SLOT_SM) { /* SmartMedia slot */
+ err = cb710_register_slot(chip,
+ CB710_SLOT_SM, 0x60, "cb710-sm");
+ if (err)
+ goto unreg_ms;
+ }
+
+ return 0;
+unreg_ms:
+ cb710_unregister_slot(chip, CB710_SLOT_MS);
+unreg_mmc:
+ cb710_unregister_slot(chip, CB710_SLOT_MMC);
+
+#ifdef CONFIG_CB710_DEBUG_ASSUMPTIONS
+ BUG_ON(atomic_read(&chip->slot_refs_count) != 0);
+#endif
+ return err;
+}
+
+static void __devexit cb710_remove_one(struct pci_dev *pdev)
+{
+ struct cb710_chip *chip = pci_get_drvdata(pdev);
+ unsigned long flags;
+
+ cb710_unregister_slot(chip, CB710_SLOT_SM);
+ cb710_unregister_slot(chip, CB710_SLOT_MS);
+ cb710_unregister_slot(chip, CB710_SLOT_MMC);
+#ifdef CONFIG_CB710_DEBUG_ASSUMPTIONS
+ BUG_ON(atomic_read(&chip->slot_refs_count) != 0);
+#endif
+
+ spin_lock_irqsave(&cb710_ida_lock, flags);
+ ida_remove(&cb710_ida, chip->platform_id);
+ spin_unlock_irqrestore(&cb710_ida_lock, flags);
+}
+
+static const struct pci_device_id cb710_pci_tbl[] = {
+ { PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_CB710_FLASH,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ { 0, }
+};
+
+static struct pci_driver cb710_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = cb710_pci_tbl,
+ .probe = cb710_probe,
+ .remove = __devexit_p(cb710_remove_one),
+#ifdef CONFIG_PM
+ .suspend = cb710_suspend,
+ .resume = cb710_resume,
+#endif
+};
+
+static int __init cb710_init_module(void)
+{
+ return pci_register_driver(&cb710_driver);
+}
+
+static void __exit cb710_cleanup_module(void)
+{
+ pci_unregister_driver(&cb710_driver);
+ ida_destroy(&cb710_ida);
+}
+
+module_init(cb710_init_module);
+module_exit(cb710_cleanup_module);
+
+MODULE_AUTHOR("Michał Mirosław <mirq-linux@rere.qmqm.pl>");
+MODULE_DESCRIPTION("ENE CB710 memory card reader driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, cb710_pci_tbl);
diff --git a/drivers/misc/cb710/debug.c b/drivers/misc/cb710/debug.c
new file mode 100644
index 00000000..fcb3b8e3
--- /dev/null
+++ b/drivers/misc/cb710/debug.c
@@ -0,0 +1,118 @@
+/*
+ * cb710/debug.c
+ *
+ * Copyright by Michał Mirosław, 2008-2009
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/cb710.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#define CB710_REG_COUNT 0x80
+
+static const u16 allow[CB710_REG_COUNT/16] = {
+ 0xFFF0, 0xFFFF, 0xFFFF, 0xFFFF,
+ 0xFFF0, 0xFFFF, 0xFFFF, 0xFFFF,
+};
+static const char *const prefix[ARRAY_SIZE(allow)] = {
+ "MMC", "MMC", "MMC", "MMC",
+ "MS?", "MS?", "SM?", "SM?"
+};
+
+static inline int allow_reg_read(unsigned block, unsigned offset, unsigned bits)
+{
+ unsigned mask = (1 << bits/8) - 1;
+ offset *= bits/8;
+ return ((allow[block] >> offset) & mask) == mask;
+}
+
+#define CB710_READ_REGS_TEMPLATE(t) \
+static void cb710_read_regs_##t(void __iomem *iobase, \
+ u##t *reg, unsigned select) \
+{ \
+ unsigned i, j; \
+ \
+ for (i = 0; i < ARRAY_SIZE(allow); ++i, reg += 16/(t/8)) { \
+ if (!(select & (1 << i))) \
+ continue; \
+ \
+ for (j = 0; j < 0x10/(t/8); ++j) { \
+ if (!allow_reg_read(i, j, t)) \
+ continue; \
+ reg[j] = ioread##t(iobase \
+ + (i << 4) + (j * (t/8))); \
+ } \
+ } \
+}
+
+static const char cb710_regf_8[] = "%02X";
+static const char cb710_regf_16[] = "%04X";
+static const char cb710_regf_32[] = "%08X";
+static const char cb710_xes[] = "xxxxxxxx";
+
+#define CB710_DUMP_REGS_TEMPLATE(t) \
+static void cb710_dump_regs_##t(struct device *dev, \
+ const u##t *reg, unsigned select) \
+{ \
+ const char *const xp = &cb710_xes[8 - t/4]; \
+ const char *const format = cb710_regf_##t; \
+ \
+ char msg[100], *p; \
+ unsigned i, j; \
+ \
+ for (i = 0; i < ARRAY_SIZE(allow); ++i, reg += 16/(t/8)) { \
+ if (!(select & (1 << i))) \
+ continue; \
+ p = msg; \
+ for (j = 0; j < 0x10/(t/8); ++j) { \
+ *p++ = ' '; \
+ if (j == 8/(t/8)) \
+ *p++ = ' '; \
+ if (allow_reg_read(i, j, t)) \
+ p += sprintf(p, format, reg[j]); \
+ else \
+ p += sprintf(p, "%s", xp); \
+ } \
+ dev_dbg(dev, "%s 0x%02X %s\n", prefix[i], i << 4, msg); \
+ } \
+}
+
+#define CB710_READ_AND_DUMP_REGS_TEMPLATE(t) \
+static void cb710_read_and_dump_regs_##t(struct cb710_chip *chip, \
+ unsigned select) \
+{ \
+ u##t regs[CB710_REG_COUNT/sizeof(u##t)]; \
+ \
+ memset(&regs, 0, sizeof(regs)); \
+ cb710_read_regs_##t(chip->iobase, regs, select); \
+ cb710_dump_regs_##t(cb710_chip_dev(chip), regs, select); \
+}
+
+#define CB710_REG_ACCESS_TEMPLATES(t) \
+ CB710_READ_REGS_TEMPLATE(t) \
+ CB710_DUMP_REGS_TEMPLATE(t) \
+ CB710_READ_AND_DUMP_REGS_TEMPLATE(t)
+
+CB710_REG_ACCESS_TEMPLATES(8)
+CB710_REG_ACCESS_TEMPLATES(16)
+CB710_REG_ACCESS_TEMPLATES(32)
+
+void cb710_dump_regs(struct cb710_chip *chip, unsigned select)
+{
+ if (!(select & CB710_DUMP_REGS_MASK))
+ select = CB710_DUMP_REGS_ALL;
+ if (!(select & CB710_DUMP_ACCESS_MASK))
+ select |= CB710_DUMP_ACCESS_8;
+
+ if (select & CB710_DUMP_ACCESS_32)
+ cb710_read_and_dump_regs_32(chip, select);
+ if (select & CB710_DUMP_ACCESS_16)
+ cb710_read_and_dump_regs_16(chip, select);
+ if (select & CB710_DUMP_ACCESS_8)
+ cb710_read_and_dump_regs_8(chip, select);
+}
+EXPORT_SYMBOL_GPL(cb710_dump_regs);
+
diff --git a/drivers/misc/cb710/sgbuf2.c b/drivers/misc/cb710/sgbuf2.c
new file mode 100644
index 00000000..2a40d0ef
--- /dev/null
+++ b/drivers/misc/cb710/sgbuf2.c
@@ -0,0 +1,146 @@
+/*
+ * cb710/sgbuf2.c
+ *
+ * Copyright by Michał Mirosław, 2008-2009
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/cb710.h>
+
+static bool sg_dwiter_next(struct sg_mapping_iter *miter)
+{
+ if (sg_miter_next(miter)) {
+ miter->consumed = 0;
+ return true;
+ } else
+ return false;
+}
+
+static bool sg_dwiter_is_at_end(struct sg_mapping_iter *miter)
+{
+ return miter->length == miter->consumed && !sg_dwiter_next(miter);
+}
+
+static uint32_t sg_dwiter_read_buffer(struct sg_mapping_iter *miter)
+{
+ size_t len, left = 4;
+ uint32_t data;
+ void *addr = &data;
+
+ do {
+ len = min(miter->length - miter->consumed, left);
+ memcpy(addr, miter->addr + miter->consumed, len);
+ miter->consumed += len;
+ left -= len;
+ if (!left)
+ return data;
+ addr += len;
+ } while (sg_dwiter_next(miter));
+
+ memset(addr, 0, left);
+ return data;
+}
+
+static inline bool needs_unaligned_copy(const void *ptr)
+{
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ return false;
+#else
+ return ((ptr - NULL) & 3) != 0;
+#endif
+}
+
+static bool sg_dwiter_get_next_block(struct sg_mapping_iter *miter, uint32_t **ptr)
+{
+ size_t len;
+
+ if (sg_dwiter_is_at_end(miter))
+ return true;
+
+ len = miter->length - miter->consumed;
+
+ if (likely(len >= 4 && !needs_unaligned_copy(
+ miter->addr + miter->consumed))) {
+ *ptr = miter->addr + miter->consumed;
+ miter->consumed += 4;
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * cb710_sg_dwiter_read_next_block() - get next 32-bit word from sg buffer
+ * @miter: sg mapping iterator used for reading
+ *
+ * Description:
+ * Returns 32-bit word starting at byte pointed to by @miter@
+ * handling any alignment issues. Bytes past the buffer's end
+ * are not accessed (read) but are returned as zeroes. @miter@
+ * is advanced by 4 bytes or to the end of buffer whichever is
+ * closer.
+ *
+ * Context:
+ * Same requirements as in sg_miter_next().
+ *
+ * Returns:
+ * 32-bit word just read.
+ */
+uint32_t cb710_sg_dwiter_read_next_block(struct sg_mapping_iter *miter)
+{
+ uint32_t *ptr = NULL;
+
+ if (likely(sg_dwiter_get_next_block(miter, &ptr)))
+ return ptr ? *ptr : 0;
+
+ return sg_dwiter_read_buffer(miter);
+}
+EXPORT_SYMBOL_GPL(cb710_sg_dwiter_read_next_block);
+
+static void sg_dwiter_write_slow(struct sg_mapping_iter *miter, uint32_t data)
+{
+ size_t len, left = 4;
+ void *addr = &data;
+
+ do {
+ len = min(miter->length - miter->consumed, left);
+ memcpy(miter->addr, addr, len);
+ miter->consumed += len;
+ left -= len;
+ if (!left)
+ return;
+ addr += len;
+ } while (sg_dwiter_next(miter));
+}
+
+/**
+ * cb710_sg_dwiter_write_next_block() - write next 32-bit word to sg buffer
+ * @miter: sg mapping iterator used for writing
+ *
+ * Description:
+ * Writes 32-bit word starting at byte pointed to by @miter@
+ * handling any alignment issues. Bytes which would be written
+ * past the buffer's end are silently discarded. @miter@ is
+ * advanced by 4 bytes or to the end of buffer whichever is closer.
+ *
+ * Context:
+ * Same requirements as in sg_miter_next().
+ */
+void cb710_sg_dwiter_write_next_block(struct sg_mapping_iter *miter, uint32_t data)
+{
+ uint32_t *ptr = NULL;
+
+ if (likely(sg_dwiter_get_next_block(miter, &ptr))) {
+ if (ptr)
+ *ptr = data;
+ else
+ return;
+ } else
+ sg_dwiter_write_slow(miter, data);
+}
+EXPORT_SYMBOL_GPL(cb710_sg_dwiter_write_next_block);
+
diff --git a/drivers/misc/cs5535-mfgpt.c b/drivers/misc/cs5535-mfgpt.c
new file mode 100644
index 00000000..f505a40a
--- /dev/null
+++ b/drivers/misc/cs5535-mfgpt.c
@@ -0,0 +1,351 @@
+/*
+ * Driver for the CS5535/CS5536 Multi-Function General Purpose Timers (MFGPT)
+ *
+ * Copyright (C) 2006, Advanced Micro Devices, Inc.
+ * Copyright (C) 2007 Andres Salomon <dilinger@debian.org>
+ * Copyright (C) 2009 Andres Salomon <dilinger@collabora.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * The MFGPTs are documented in AMD Geode CS5536 Companion Device Data Book.
+ */
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/cs5535.h>
+#include <linux/slab.h>
+
+#define DRV_NAME "cs5535-mfgpt"
+
+static int mfgpt_reset_timers;
+module_param_named(mfgptfix, mfgpt_reset_timers, int, 0644);
+MODULE_PARM_DESC(mfgptfix, "Reset the MFGPT timers during init; "
+ "required by some broken BIOSes (ie, TinyBIOS < 0.99).");
+
+struct cs5535_mfgpt_timer {
+ struct cs5535_mfgpt_chip *chip;
+ int nr;
+};
+
+static struct cs5535_mfgpt_chip {
+ DECLARE_BITMAP(avail, MFGPT_MAX_TIMERS);
+ resource_size_t base;
+
+ struct platform_device *pdev;
+ spinlock_t lock;
+ int initialized;
+} cs5535_mfgpt_chip;
+
+int cs5535_mfgpt_toggle_event(struct cs5535_mfgpt_timer *timer, int cmp,
+ int event, int enable)
+{
+ uint32_t msr, mask, value, dummy;
+ int shift = (cmp == MFGPT_CMP1) ? 0 : 8;
+
+ if (!timer) {
+ WARN_ON(1);
+ return -EIO;
+ }
+
+ /*
+ * The register maps for these are described in sections 6.17.1.x of
+ * the AMD Geode CS5536 Companion Device Data Book.
+ */
+ switch (event) {
+ case MFGPT_EVENT_RESET:
+ /*
+ * XXX: According to the docs, we cannot reset timers above
+ * 6; that is, resets for 7 and 8 will be ignored. Is this
+ * a problem? -dilinger
+ */
+ msr = MSR_MFGPT_NR;
+ mask = 1 << (timer->nr + 24);
+ break;
+
+ case MFGPT_EVENT_NMI:
+ msr = MSR_MFGPT_NR;
+ mask = 1 << (timer->nr + shift);
+ break;
+
+ case MFGPT_EVENT_IRQ:
+ msr = MSR_MFGPT_IRQ;
+ mask = 1 << (timer->nr + shift);
+ break;
+
+ default:
+ return -EIO;
+ }
+
+ rdmsr(msr, value, dummy);
+
+ if (enable)
+ value |= mask;
+ else
+ value &= ~mask;
+
+ wrmsr(msr, value, dummy);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cs5535_mfgpt_toggle_event);
+
+int cs5535_mfgpt_set_irq(struct cs5535_mfgpt_timer *timer, int cmp, int *irq,
+ int enable)
+{
+ uint32_t zsel, lpc, dummy;
+ int shift;
+
+ if (!timer) {
+ WARN_ON(1);
+ return -EIO;
+ }
+
+ /*
+ * Unfortunately, MFGPTs come in pairs sharing their IRQ lines. If VSA
+ * is using the same CMP of the timer's Siamese twin, the IRQ is set to
+ * 2, and we mustn't use nor change it.
+ * XXX: Likewise, 2 Linux drivers might clash if the 2nd overwrites the
+ * IRQ of the 1st. This can only happen if forcing an IRQ, calling this
+ * with *irq==0 is safe. Currently there _are_ no 2 drivers.
+ */
+ rdmsr(MSR_PIC_ZSEL_LOW, zsel, dummy);
+ shift = ((cmp == MFGPT_CMP1 ? 0 : 4) + timer->nr % 4) * 4;
+ if (((zsel >> shift) & 0xF) == 2)
+ return -EIO;
+
+ /* Choose IRQ: if none supplied, keep IRQ already set or use default */
+ if (!*irq)
+ *irq = (zsel >> shift) & 0xF;
+ if (!*irq)
+ *irq = CONFIG_CS5535_MFGPT_DEFAULT_IRQ;
+
+ /* Can't use IRQ if it's 0 (=disabled), 2, or routed to LPC */
+ if (*irq < 1 || *irq == 2 || *irq > 15)
+ return -EIO;
+ rdmsr(MSR_PIC_IRQM_LPC, lpc, dummy);
+ if (lpc & (1 << *irq))
+ return -EIO;
+
+ /* All chosen and checked - go for it */
+ if (cs5535_mfgpt_toggle_event(timer, cmp, MFGPT_EVENT_IRQ, enable))
+ return -EIO;
+ if (enable) {
+ zsel = (zsel & ~(0xF << shift)) | (*irq << shift);
+ wrmsr(MSR_PIC_ZSEL_LOW, zsel, dummy);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cs5535_mfgpt_set_irq);
+
+struct cs5535_mfgpt_timer *cs5535_mfgpt_alloc_timer(int timer_nr, int domain)
+{
+ struct cs5535_mfgpt_chip *mfgpt = &cs5535_mfgpt_chip;
+ struct cs5535_mfgpt_timer *timer = NULL;
+ unsigned long flags;
+ int max;
+
+ if (!mfgpt->initialized)
+ goto done;
+
+ /* only allocate timers from the working domain if requested */
+ if (domain == MFGPT_DOMAIN_WORKING)
+ max = 6;
+ else
+ max = MFGPT_MAX_TIMERS;
+
+ if (timer_nr >= max) {
+ /* programmer error. silly programmers! */
+ WARN_ON(1);
+ goto done;
+ }
+
+ spin_lock_irqsave(&mfgpt->lock, flags);
+ if (timer_nr < 0) {
+ unsigned long t;
+
+ /* try to find any available timer */
+ t = find_first_bit(mfgpt->avail, max);
+ /* set timer_nr to -1 if no timers available */
+ timer_nr = t < max ? (int) t : -1;
+ } else {
+ /* check if the requested timer's available */
+ if (!test_bit(timer_nr, mfgpt->avail))
+ timer_nr = -1;
+ }
+
+ if (timer_nr >= 0)
+ /* if timer_nr is not -1, it's an available timer */
+ __clear_bit(timer_nr, mfgpt->avail);
+ spin_unlock_irqrestore(&mfgpt->lock, flags);
+
+ if (timer_nr < 0)
+ goto done;
+
+ timer = kmalloc(sizeof(*timer), GFP_KERNEL);
+ if (!timer) {
+ /* aw hell */
+ spin_lock_irqsave(&mfgpt->lock, flags);
+ __set_bit(timer_nr, mfgpt->avail);
+ spin_unlock_irqrestore(&mfgpt->lock, flags);
+ goto done;
+ }
+ timer->chip = mfgpt;
+ timer->nr = timer_nr;
+ dev_info(&mfgpt->pdev->dev, "registered timer %d\n", timer_nr);
+
+done:
+ return timer;
+}
+EXPORT_SYMBOL_GPL(cs5535_mfgpt_alloc_timer);
+
+/*
+ * XXX: This frees the timer memory, but never resets the actual hardware
+ * timer. The old geode_mfgpt code did this; it would be good to figure
+ * out a way to actually release the hardware timer. See comments below.
+ */
+void cs5535_mfgpt_free_timer(struct cs5535_mfgpt_timer *timer)
+{
+ unsigned long flags;
+ uint16_t val;
+
+ /* timer can be made available again only if never set up */
+ val = cs5535_mfgpt_read(timer, MFGPT_REG_SETUP);
+ if (!(val & MFGPT_SETUP_SETUP)) {
+ spin_lock_irqsave(&timer->chip->lock, flags);
+ __set_bit(timer->nr, timer->chip->avail);
+ spin_unlock_irqrestore(&timer->chip->lock, flags);
+ }
+
+ kfree(timer);
+}
+EXPORT_SYMBOL_GPL(cs5535_mfgpt_free_timer);
+
+uint16_t cs5535_mfgpt_read(struct cs5535_mfgpt_timer *timer, uint16_t reg)
+{
+ return inw(timer->chip->base + reg + (timer->nr * 8));
+}
+EXPORT_SYMBOL_GPL(cs5535_mfgpt_read);
+
+void cs5535_mfgpt_write(struct cs5535_mfgpt_timer *timer, uint16_t reg,
+ uint16_t value)
+{
+ outw(value, timer->chip->base + reg + (timer->nr * 8));
+}
+EXPORT_SYMBOL_GPL(cs5535_mfgpt_write);
+
+/*
+ * This is a sledgehammer that resets all MFGPT timers. This is required by
+ * some broken BIOSes which leave the system in an unstable state
+ * (TinyBIOS 0.98, for example; fixed in 0.99). It's uncertain as to
+ * whether or not this secret MSR can be used to release individual timers.
+ * Jordan tells me that he and Mitch once played w/ it, but it's unclear
+ * what the results of that were (and they experienced some instability).
+ */
+static void __devinit reset_all_timers(void)
+{
+ uint32_t val, dummy;
+
+ /* The following undocumented bit resets the MFGPT timers */
+ val = 0xFF; dummy = 0;
+ wrmsr(MSR_MFGPT_SETUP, val, dummy);
+}
+
+/*
+ * Check whether any MFGPTs are available for the kernel to use. In most
+ * cases, firmware that uses AMD's VSA code will claim all timers during
+ * bootup; we certainly don't want to take them if they're already in use.
+ * In other cases (such as with VSAless OpenFirmware), the system firmware
+ * leaves timers available for us to use.
+ */
+static int __devinit scan_timers(struct cs5535_mfgpt_chip *mfgpt)
+{
+ struct cs5535_mfgpt_timer timer = { .chip = mfgpt };
+ unsigned long flags;
+ int timers = 0;
+ uint16_t val;
+ int i;
+
+ /* bios workaround */
+ if (mfgpt_reset_timers)
+ reset_all_timers();
+
+ /* just to be safe, protect this section w/ lock */
+ spin_lock_irqsave(&mfgpt->lock, flags);
+ for (i = 0; i < MFGPT_MAX_TIMERS; i++) {
+ timer.nr = i;
+ val = cs5535_mfgpt_read(&timer, MFGPT_REG_SETUP);
+ if (!(val & MFGPT_SETUP_SETUP)) {
+ __set_bit(i, mfgpt->avail);
+ timers++;
+ }
+ }
+ spin_unlock_irqrestore(&mfgpt->lock, flags);
+
+ return timers;
+}
+
+static int __devinit cs5535_mfgpt_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int err = -EIO, t;
+
+ /* There are two ways to get the MFGPT base address; one is by
+ * fetching it from MSR_LBAR_MFGPT, the other is by reading the
+ * PCI BAR info. The latter method is easier (especially across
+ * different architectures), so we'll stick with that for now. If
+ * it turns out to be unreliable in the face of crappy BIOSes, we
+ * can always go back to using MSRs.. */
+
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "can't fetch device resource info\n");
+ goto done;
+ }
+
+ if (!request_region(res->start, resource_size(res), pdev->name)) {
+ dev_err(&pdev->dev, "can't request region\n");
+ goto done;
+ }
+
+ /* set up the driver-specific struct */
+ cs5535_mfgpt_chip.base = res->start;
+ cs5535_mfgpt_chip.pdev = pdev;
+ spin_lock_init(&cs5535_mfgpt_chip.lock);
+
+ dev_info(&pdev->dev, "reserved resource region %pR\n", res);
+
+ /* detect the available timers */
+ t = scan_timers(&cs5535_mfgpt_chip);
+ dev_info(&pdev->dev, "%d MFGPT timers available\n", t);
+ cs5535_mfgpt_chip.initialized = 1;
+ return 0;
+
+done:
+ return err;
+}
+
+static struct platform_driver cs5535_mfgpt_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = cs5535_mfgpt_probe,
+};
+
+
+static int __init cs5535_mfgpt_init(void)
+{
+ return platform_driver_register(&cs5535_mfgpt_driver);
+}
+
+module_init(cs5535_mfgpt_init);
+
+MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
+MODULE_DESCRIPTION("CS5535/CS5536 MFGPT timer driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/misc/ds1682.c b/drivers/misc/ds1682.c
new file mode 100644
index 00000000..154b02e5
--- /dev/null
+++ b/drivers/misc/ds1682.c
@@ -0,0 +1,257 @@
+/*
+ * Dallas Semiconductor DS1682 Elapsed Time Recorder device driver
+ *
+ * Written by: Grant Likely <grant.likely@secretlab.ca>
+ *
+ * Copyright (C) 2007 Secret Lab Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * The DS1682 elapsed timer recorder is a simple device that implements
+ * one elapsed time counter, one event counter, an alarm signal and 10
+ * bytes of general purpose EEPROM.
+ *
+ * This driver provides access to the DS1682 counters and user data via
+ * the sysfs. The following attributes are added to the device node:
+ * elapsed_time (u32): Total elapsed event time in ms resolution
+ * alarm_time (u32): When elapsed time exceeds the value in alarm_time,
+ * then the alarm pin is asserted.
+ * event_count (u16): number of times the event pin has gone low.
+ * eeprom (u8[10]): general purpose EEPROM
+ *
+ * Counter registers and user data are both read/write unless the device
+ * has been write protected. This driver does not support turning off write
+ * protection. Once write protection is turned on, it is impossible to
+ * turn it off again, so I have left the feature out of this driver to avoid
+ * accidental enabling, but it is trivial to add write protect support.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include <linux/sysfs.h>
+#include <linux/ctype.h>
+#include <linux/hwmon-sysfs.h>
+
+/* Device registers */
+#define DS1682_REG_CONFIG 0x00
+#define DS1682_REG_ALARM 0x01
+#define DS1682_REG_ELAPSED 0x05
+#define DS1682_REG_EVT_CNTR 0x09
+#define DS1682_REG_EEPROM 0x0b
+#define DS1682_REG_RESET 0x1d
+#define DS1682_REG_WRITE_DISABLE 0x1e
+#define DS1682_REG_WRITE_MEM_DISABLE 0x1f
+
+#define DS1682_EEPROM_SIZE 10
+
+/*
+ * Generic counter attributes
+ */
+static ssize_t ds1682_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+ struct i2c_client *client = to_i2c_client(dev);
+ __le32 val = 0;
+ int rc;
+
+ dev_dbg(dev, "ds1682_show() called on %s\n", attr->attr.name);
+
+ /* Read the register */
+ rc = i2c_smbus_read_i2c_block_data(client, sattr->index, sattr->nr,
+ (u8 *) & val);
+ if (rc < 0)
+ return -EIO;
+
+ /* Special case: the 32 bit regs are time values with 1/4s
+ * resolution, scale them up to milliseconds */
+ if (sattr->nr == 4)
+ return sprintf(buf, "%llu\n",
+ ((unsigned long long)le32_to_cpu(val)) * 250);
+
+ /* Format the output string and return # of bytes */
+ return sprintf(buf, "%li\n", (long)le32_to_cpu(val));
+}
+
+static ssize_t ds1682_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+ struct i2c_client *client = to_i2c_client(dev);
+ char *endp;
+ u64 val;
+ __le32 val_le;
+ int rc;
+
+ dev_dbg(dev, "ds1682_store() called on %s\n", attr->attr.name);
+
+ /* Decode input */
+ val = simple_strtoull(buf, &endp, 0);
+ if (buf == endp) {
+ dev_dbg(dev, "input string not a number\n");
+ return -EINVAL;
+ }
+
+ /* Special case: the 32 bit regs are time values with 1/4s
+ * resolution, scale input down to quarter-seconds */
+ if (sattr->nr == 4)
+ do_div(val, 250);
+
+ /* write out the value */
+ val_le = cpu_to_le32(val);
+ rc = i2c_smbus_write_i2c_block_data(client, sattr->index, sattr->nr,
+ (u8 *) & val_le);
+ if (rc < 0) {
+ dev_err(dev, "register write failed; reg=0x%x, size=%i\n",
+ sattr->index, sattr->nr);
+ return -EIO;
+ }
+
+ return count;
+}
+
+/*
+ * Simple register attributes
+ */
+static SENSOR_DEVICE_ATTR_2(elapsed_time, S_IRUGO | S_IWUSR, ds1682_show,
+ ds1682_store, 4, DS1682_REG_ELAPSED);
+static SENSOR_DEVICE_ATTR_2(alarm_time, S_IRUGO | S_IWUSR, ds1682_show,
+ ds1682_store, 4, DS1682_REG_ALARM);
+static SENSOR_DEVICE_ATTR_2(event_count, S_IRUGO | S_IWUSR, ds1682_show,
+ ds1682_store, 2, DS1682_REG_EVT_CNTR);
+
+static const struct attribute_group ds1682_group = {
+ .attrs = (struct attribute *[]) {
+ &sensor_dev_attr_elapsed_time.dev_attr.attr,
+ &sensor_dev_attr_alarm_time.dev_attr.attr,
+ &sensor_dev_attr_event_count.dev_attr.attr,
+ NULL,
+ },
+};
+
+/*
+ * User data attribute
+ */
+static ssize_t ds1682_eeprom_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct i2c_client *client = kobj_to_i2c_client(kobj);
+ int rc;
+
+ dev_dbg(&client->dev, "ds1682_eeprom_read(p=%p, off=%lli, c=%zi)\n",
+ buf, off, count);
+
+ if (off >= DS1682_EEPROM_SIZE)
+ return 0;
+
+ if (off + count > DS1682_EEPROM_SIZE)
+ count = DS1682_EEPROM_SIZE - off;
+
+ rc = i2c_smbus_read_i2c_block_data(client, DS1682_REG_EEPROM + off,
+ count, buf);
+ if (rc < 0)
+ return -EIO;
+
+ return count;
+}
+
+static ssize_t ds1682_eeprom_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct i2c_client *client = kobj_to_i2c_client(kobj);
+
+ dev_dbg(&client->dev, "ds1682_eeprom_write(p=%p, off=%lli, c=%zi)\n",
+ buf, off, count);
+
+ if (off >= DS1682_EEPROM_SIZE)
+ return -ENOSPC;
+
+ if (off + count > DS1682_EEPROM_SIZE)
+ count = DS1682_EEPROM_SIZE - off;
+
+ /* Write out to the device */
+ if (i2c_smbus_write_i2c_block_data(client, DS1682_REG_EEPROM + off,
+ count, buf) < 0)
+ return -EIO;
+
+ return count;
+}
+
+static struct bin_attribute ds1682_eeprom_attr = {
+ .attr = {
+ .name = "eeprom",
+ .mode = S_IRUGO | S_IWUSR,
+ },
+ .size = DS1682_EEPROM_SIZE,
+ .read = ds1682_eeprom_read,
+ .write = ds1682_eeprom_write,
+};
+
+/*
+ * Called when a ds1682 device is matched with this driver
+ */
+static int ds1682_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int rc;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_I2C_BLOCK)) {
+ dev_err(&client->dev, "i2c bus does not support the ds1682\n");
+ rc = -ENODEV;
+ goto exit;
+ }
+
+ rc = sysfs_create_group(&client->dev.kobj, &ds1682_group);
+ if (rc)
+ goto exit;
+
+ rc = sysfs_create_bin_file(&client->dev.kobj, &ds1682_eeprom_attr);
+ if (rc)
+ goto exit_bin_attr;
+
+ return 0;
+
+ exit_bin_attr:
+ sysfs_remove_group(&client->dev.kobj, &ds1682_group);
+ exit:
+ return rc;
+}
+
+static int ds1682_remove(struct i2c_client *client)
+{
+ sysfs_remove_bin_file(&client->dev.kobj, &ds1682_eeprom_attr);
+ sysfs_remove_group(&client->dev.kobj, &ds1682_group);
+ return 0;
+}
+
+static const struct i2c_device_id ds1682_id[] = {
+ { "ds1682", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ds1682_id);
+
+static struct i2c_driver ds1682_driver = {
+ .driver = {
+ .name = "ds1682",
+ },
+ .probe = ds1682_probe,
+ .remove = ds1682_remove,
+ .id_table = ds1682_id,
+};
+
+module_i2c_driver(ds1682_driver);
+
+MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
+MODULE_DESCRIPTION("DS1682 Elapsed Time Indicator driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
new file mode 100644
index 00000000..701edf65
--- /dev/null
+++ b/drivers/misc/eeprom/Kconfig
@@ -0,0 +1,98 @@
+menu "EEPROM support"
+
+config EEPROM_AT24
+ tristate "I2C EEPROMs from most vendors"
+ depends on I2C && SYSFS
+ help
+ Enable this driver to get read/write support to most I2C EEPROMs,
+ after you configure the driver to know about each EEPROM on
+ your target board. Use these generic chip names, instead of
+ vendor-specific ones like at24c64 or 24lc02:
+
+ 24c00, 24c01, 24c02, spd (readonly 24c02), 24c04, 24c08,
+ 24c16, 24c32, 24c64, 24c128, 24c256, 24c512, 24c1024
+
+ Unless you like data loss puzzles, always be sure that any chip
+ you configure as a 24c32 (32 kbit) or larger is NOT really a
+ 24c16 (16 kbit) or smaller, and vice versa. Marking the chip
+ as read-only won't help recover from this. Also, if your chip
+ has any software write-protect mechanism you may want to review the
+ code to make sure this driver won't turn it on by accident.
+
+ If you use this with an SMBus adapter instead of an I2C adapter,
+ full functionality is not available. Only smaller devices are
+ supported (24c16 and below, max 4 kByte).
+
+ This driver can also be built as a module. If so, the module
+ will be called at24.
+
+config EEPROM_AT25
+ tristate "SPI EEPROMs from most vendors"
+ depends on SPI && SYSFS
+ help
+ Enable this driver to get read/write support to most SPI EEPROMs,
+ after you configure the board init code to know about each eeprom
+ on your target board.
+
+ This driver can also be built as a module. If so, the module
+ will be called at25.
+
+config EEPROM_LEGACY
+ tristate "Old I2C EEPROM reader"
+ depends on I2C && SYSFS
+ help
+ If you say yes here you get read-only access to the EEPROM data
+ available on modern memory DIMMs and Sony Vaio laptops via I2C. Such
+ EEPROMs could theoretically be available on other devices as well.
+
+ This driver can also be built as a module. If so, the module
+ will be called eeprom.
+
+config EEPROM_MAX6875
+ tristate "Maxim MAX6874/5 power supply supervisor"
+ depends on I2C && EXPERIMENTAL
+ help
+ If you say yes here you get read-only support for the user EEPROM of
+ the Maxim MAX6874/5 EEPROM-programmable, quad power-supply
+ sequencer/supervisor.
+
+ All other features of this chip should be accessed via i2c-dev.
+
+ This driver can also be built as a module. If so, the module
+ will be called max6875.
+
+
+config EEPROM_93CX6
+ tristate "EEPROM 93CX6 support"
+ help
+ This is a driver for the EEPROM chipsets 93c46 and 93c66.
+ The driver supports both read as well as write commands.
+
+ If unsure, say N.
+
+config EEPROM_93XX46
+ tristate "Microwire EEPROM 93XX46 support"
+ depends on SPI && SYSFS
+ help
+ Driver for the microwire EEPROM chipsets 93xx46x. The driver
+ supports both read and write commands and also the command to
+ erase the whole EEPROM.
+
+ This driver can also be built as a module. If so, the module
+ will be called eeprom_93xx46.
+
+ If unsure, say N.
+
+config EEPROM_DIGSY_MTC_CFG
+ bool "DigsyMTC display configuration EEPROMs device"
+ depends on GPIO_MPC5200 && SPI_GPIO
+ help
+ This option enables access to display configuration EEPROMs
+ on digsy_mtc board. You have to additionally select Microwire
+ EEPROM 93XX46 driver. sysfs entries will be created for that
+ EEPROM allowing to read/write the configuration data or to
+ erase the whole EEPROM.
+
+ If unsure, say N.
+
+endmenu
diff --git a/drivers/misc/eeprom/Makefile b/drivers/misc/eeprom/Makefile
new file mode 100644
index 00000000..fc1e81d2
--- /dev/null
+++ b/drivers/misc/eeprom/Makefile
@@ -0,0 +1,7 @@
+obj-$(CONFIG_EEPROM_AT24) += at24.o
+obj-$(CONFIG_EEPROM_AT25) += at25.o
+obj-$(CONFIG_EEPROM_LEGACY) += eeprom.o
+obj-$(CONFIG_EEPROM_MAX6875) += max6875.o
+obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o
+obj-$(CONFIG_EEPROM_93XX46) += eeprom_93xx46.o
+obj-$(CONFIG_EEPROM_DIGSY_MTC_CFG) += digsy_mtc_eeprom.o
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
new file mode 100644
index 00000000..ab1ad417
--- /dev/null
+++ b/drivers/misc/eeprom/at24.c
@@ -0,0 +1,707 @@
+/*
+ * at24.c - handle most I2C EEPROMs
+ *
+ * Copyright (C) 2005-2007 David Brownell
+ * Copyright (C) 2008 Wolfram Sang, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/sysfs.h>
+#include <linux/mod_devicetable.h>
+#include <linux/log2.h>
+#include <linux/bitops.h>
+#include <linux/jiffies.h>
+#include <linux/of.h>
+#include <linux/i2c.h>
+#include <linux/i2c/at24.h>
+
+/*
+ * I2C EEPROMs from most vendors are inexpensive and mostly interchangeable.
+ * Differences between different vendor product lines (like Atmel AT24C or
+ * MicroChip 24LC, etc) won't much matter for typical read/write access.
+ * There are also I2C RAM chips, likewise interchangeable. One example
+ * would be the PCF8570, which acts like a 24c02 EEPROM (256 bytes).
+ *
+ * However, misconfiguration can lose data. "Set 16-bit memory address"
+ * to a part with 8-bit addressing will overwrite data. Writing with too
+ * big a page size also loses data. And it's not safe to assume that the
+ * conventional addresses 0x50..0x57 only hold eeproms; a PCF8563 RTC
+ * uses 0x51, for just one example.
+ *
+ * Accordingly, explicit board-specific configuration data should be used
+ * in almost all cases. (One partial exception is an SMBus used to access
+ * "SPD" data for DRAM sticks. Those only use 24c02 EEPROMs.)
+ *
+ * So this driver uses "new style" I2C driver binding, expecting to be
+ * told what devices exist. That may be in arch/X/mach-Y/board-Z.c or
+ * similar kernel-resident tables; or, configuration data coming from
+ * a bootloader.
+ *
+ * Other than binding model, current differences from "eeprom" driver are
+ * that this one handles write access and isn't restricted to 24c02 devices.
+ * It also handles larger devices (32 kbit and up) with two-byte addresses,
+ * which won't work on pure SMBus systems.
+ */
+
+struct at24_data {
+ struct at24_platform_data chip;
+ struct memory_accessor macc;
+ int use_smbus;
+
+ /*
+ * Lock protects against activities from other Linux tasks,
+ * but not from changes by other I2C masters.
+ */
+ struct mutex lock;
+ struct bin_attribute bin;
+
+ u8 *writebuf;
+ unsigned write_max;
+ unsigned num_addresses;
+
+ /*
+ * Some chips tie up multiple I2C addresses; dummy devices reserve
+ * them for us, and we'll use them with SMBus calls.
+ */
+ struct i2c_client *client[];
+};
+
+/*
+ * This parameter is to help this driver avoid blocking other drivers out
+ * of I2C for potentially troublesome amounts of time. With a 100 kHz I2C
+ * clock, one 256 byte read takes about 1/43 second which is excessive;
+ * but the 1/170 second it takes at 400 kHz may be quite reasonable; and
+ * at 1 MHz (Fm+) a 1/430 second delay could easily be invisible.
+ *
+ * This value is forced to be a power of two so that writes align on pages.
+ */
+static unsigned io_limit = 128;
+module_param(io_limit, uint, 0);
+MODULE_PARM_DESC(io_limit, "Maximum bytes per I/O (default 128)");
+
+/*
+ * Specs often allow 5 msec for a page write, sometimes 20 msec;
+ * it's important to recover from write timeouts.
+ */
+static unsigned write_timeout = 25;
+module_param(write_timeout, uint, 0);
+MODULE_PARM_DESC(write_timeout, "Time (in ms) to try writes (default 25)");
+
+#define AT24_SIZE_BYTELEN 5
+#define AT24_SIZE_FLAGS 8
+
+#define AT24_BITMASK(x) (BIT(x) - 1)
+
+/* create non-zero magic value for given eeprom parameters */
+#define AT24_DEVICE_MAGIC(_len, _flags) \
+ ((1 << AT24_SIZE_FLAGS | (_flags)) \
+ << AT24_SIZE_BYTELEN | ilog2(_len))
+
+static const struct i2c_device_id at24_ids[] = {
+ /* needs 8 addresses as A0-A2 are ignored */
+ { "24c00", AT24_DEVICE_MAGIC(128 / 8, AT24_FLAG_TAKE8ADDR) },
+ /* old variants can't be handled with this generic entry! */
+ { "24c01", AT24_DEVICE_MAGIC(1024 / 8, 0) },
+ { "24c02", AT24_DEVICE_MAGIC(2048 / 8, 0) },
+ /* spd is a 24c02 in memory DIMMs */
+ { "spd", AT24_DEVICE_MAGIC(2048 / 8,
+ AT24_FLAG_READONLY | AT24_FLAG_IRUGO) },
+ { "24c04", AT24_DEVICE_MAGIC(4096 / 8, 0) },
+ /* 24rf08 quirk is handled at i2c-core */
+ { "24c08", AT24_DEVICE_MAGIC(8192 / 8, 0) },
+ { "24c16", AT24_DEVICE_MAGIC(16384 / 8, 0) },
+ { "24c32", AT24_DEVICE_MAGIC(32768 / 8, AT24_FLAG_ADDR16) },
+ { "24c64", AT24_DEVICE_MAGIC(65536 / 8, AT24_FLAG_ADDR16) },
+ { "24c128", AT24_DEVICE_MAGIC(131072 / 8, AT24_FLAG_ADDR16) },
+ { "24c256", AT24_DEVICE_MAGIC(262144 / 8, AT24_FLAG_ADDR16) },
+ { "24c512", AT24_DEVICE_MAGIC(524288 / 8, AT24_FLAG_ADDR16) },
+ { "24c1024", AT24_DEVICE_MAGIC(1048576 / 8, AT24_FLAG_ADDR16) },
+ { "at24", 0 },
+ { /* END OF LIST */ }
+};
+MODULE_DEVICE_TABLE(i2c, at24_ids);
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * This routine supports chips which consume multiple I2C addresses. It
+ * computes the addressing information to be used for a given r/w request.
+ * Assumes that sanity checks for offset happened at sysfs-layer.
+ */
+static struct i2c_client *at24_translate_offset(struct at24_data *at24,
+ unsigned *offset)
+{
+ unsigned i;
+
+ if (at24->chip.flags & AT24_FLAG_ADDR16) {
+ i = *offset >> 16;
+ *offset &= 0xffff;
+ } else {
+ i = *offset >> 8;
+ *offset &= 0xff;
+ }
+
+ return at24->client[i];
+}
+
+static ssize_t at24_eeprom_read(struct at24_data *at24, char *buf,
+ unsigned offset, size_t count)
+{
+ struct i2c_msg msg[2];
+ u8 msgbuf[2];
+ struct i2c_client *client;
+ unsigned long timeout, read_time;
+ int status, i;
+
+ memset(msg, 0, sizeof(msg));
+
+ /*
+ * REVISIT some multi-address chips don't rollover page reads to
+ * the next slave address, so we may need to truncate the count.
+ * Those chips might need another quirk flag.
+ *
+ * If the real hardware used four adjacent 24c02 chips and that
+ * were misconfigured as one 24c08, that would be a similar effect:
+ * one "eeprom" file not four, but larger reads would fail when
+ * they crossed certain pages.
+ */
+
+ /*
+ * Slave address and byte offset derive from the offset. Always
+ * set the byte address; on a multi-master board, another master
+ * may have changed the chip's "current" address pointer.
+ */
+ client = at24_translate_offset(at24, &offset);
+
+ if (count > io_limit)
+ count = io_limit;
+
+ switch (at24->use_smbus) {
+ case I2C_SMBUS_I2C_BLOCK_DATA:
+ /* Smaller eeproms can work given some SMBus extension calls */
+ if (count > I2C_SMBUS_BLOCK_MAX)
+ count = I2C_SMBUS_BLOCK_MAX;
+ break;
+ case I2C_SMBUS_WORD_DATA:
+ count = 2;
+ break;
+ case I2C_SMBUS_BYTE_DATA:
+ count = 1;
+ break;
+ default:
+ /*
+ * When we have a better choice than SMBus calls, use a
+ * combined I2C message. Write address; then read up to
+ * io_limit data bytes. Note that read page rollover helps us
+ * here (unlike writes). msgbuf is u8 and will cast to our
+ * needs.
+ */
+ i = 0;
+ if (at24->chip.flags & AT24_FLAG_ADDR16)
+ msgbuf[i++] = offset >> 8;
+ msgbuf[i++] = offset;
+
+ msg[0].addr = client->addr;
+ msg[0].buf = msgbuf;
+ msg[0].len = i;
+
+ msg[1].addr = client->addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].buf = buf;
+ msg[1].len = count;
+ }
+
+ /*
+ * Reads fail if the previous write didn't complete yet. We may
+ * loop a few times until this one succeeds, waiting at least
+ * long enough for one entire page write to work.
+ */
+ timeout = jiffies + msecs_to_jiffies(write_timeout);
+ do {
+ read_time = jiffies;
+ switch (at24->use_smbus) {
+ case I2C_SMBUS_I2C_BLOCK_DATA:
+ status = i2c_smbus_read_i2c_block_data(client, offset,
+ count, buf);
+ break;
+ case I2C_SMBUS_WORD_DATA:
+ status = i2c_smbus_read_word_data(client, offset);
+ if (status >= 0) {
+ buf[0] = status & 0xff;
+ buf[1] = status >> 8;
+ status = count;
+ }
+ break;
+ case I2C_SMBUS_BYTE_DATA:
+ status = i2c_smbus_read_byte_data(client, offset);
+ if (status >= 0) {
+ buf[0] = status;
+ status = count;
+ }
+ break;
+ default:
+ status = i2c_transfer(client->adapter, msg, 2);
+ if (status == 2)
+ status = count;
+ }
+ dev_dbg(&client->dev, "read %zu@%d --> %d (%ld)\n",
+ count, offset, status, jiffies);
+
+ if (status == count)
+ return count;
+
+ /* REVISIT: at HZ=100, this is sloooow */
+ msleep(1);
+ } while (time_before(read_time, timeout));
+
+ return -ETIMEDOUT;
+}
+
+static ssize_t at24_read(struct at24_data *at24,
+ char *buf, loff_t off, size_t count)
+{
+ ssize_t retval = 0;
+
+ if (unlikely(!count))
+ return count;
+
+ /*
+ * Read data from chip, protecting against concurrent updates
+ * from this host, but not from other I2C masters.
+ */
+ mutex_lock(&at24->lock);
+
+ while (count) {
+ ssize_t status;
+
+ status = at24_eeprom_read(at24, buf, off, count);
+ if (status <= 0) {
+ if (retval == 0)
+ retval = status;
+ break;
+ }
+ buf += status;
+ off += status;
+ count -= status;
+ retval += status;
+ }
+
+ mutex_unlock(&at24->lock);
+
+ return retval;
+}
+
+static ssize_t at24_bin_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct at24_data *at24;
+
+ at24 = dev_get_drvdata(container_of(kobj, struct device, kobj));
+ return at24_read(at24, buf, off, count);
+}
+
+
+/*
+ * Note that if the hardware write-protect pin is pulled high, the whole
+ * chip is normally write protected. But there are plenty of product
+ * variants here, including OTP fuses and partial chip protect.
+ *
+ * We only use page mode writes; the alternative is sloooow. This routine
+ * writes at most one page.
+ */
+static ssize_t at24_eeprom_write(struct at24_data *at24, const char *buf,
+ unsigned offset, size_t count)
+{
+ struct i2c_client *client;
+ struct i2c_msg msg;
+ ssize_t status;
+ unsigned long timeout, write_time;
+ unsigned next_page;
+
+ /* Get corresponding I2C address and adjust offset */
+ client = at24_translate_offset(at24, &offset);
+
+ /* write_max is at most a page */
+ if (count > at24->write_max)
+ count = at24->write_max;
+
+ /* Never roll over backwards, to the start of this page */
+ next_page = roundup(offset + 1, at24->chip.page_size);
+ if (offset + count > next_page)
+ count = next_page - offset;
+
+ /* If we'll use I2C calls for I/O, set up the message */
+ if (!at24->use_smbus) {
+ int i = 0;
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+
+ /* msg.buf is u8 and casts will mask the values */
+ msg.buf = at24->writebuf;
+ if (at24->chip.flags & AT24_FLAG_ADDR16)
+ msg.buf[i++] = offset >> 8;
+
+ msg.buf[i++] = offset;
+ memcpy(&msg.buf[i], buf, count);
+ msg.len = i + count;
+ }
+
+ /*
+ * Writes fail if the previous one didn't complete yet. We may
+ * loop a few times until this one succeeds, waiting at least
+ * long enough for one entire page write to work.
+ */
+ timeout = jiffies + msecs_to_jiffies(write_timeout);
+ do {
+ write_time = jiffies;
+ if (at24->use_smbus) {
+ status = i2c_smbus_write_i2c_block_data(client,
+ offset, count, buf);
+ if (status == 0)
+ status = count;
+ } else {
+ status = i2c_transfer(client->adapter, &msg, 1);
+ if (status == 1)
+ status = count;
+ }
+ dev_dbg(&client->dev, "write %zu@%d --> %zd (%ld)\n",
+ count, offset, status, jiffies);
+
+ if (status == count)
+ return count;
+
+ /* REVISIT: at HZ=100, this is sloooow */
+ msleep(1);
+ } while (time_before(write_time, timeout));
+
+ return -ETIMEDOUT;
+}
+
+static ssize_t at24_write(struct at24_data *at24, const char *buf, loff_t off,
+ size_t count)
+{
+ ssize_t retval = 0;
+
+ if (unlikely(!count))
+ return count;
+
+ /*
+ * Write data to chip, protecting against concurrent updates
+ * from this host, but not from other I2C masters.
+ */
+ mutex_lock(&at24->lock);
+
+ while (count) {
+ ssize_t status;
+
+ status = at24_eeprom_write(at24, buf, off, count);
+ if (status <= 0) {
+ if (retval == 0)
+ retval = status;
+ break;
+ }
+ buf += status;
+ off += status;
+ count -= status;
+ retval += status;
+ }
+
+ mutex_unlock(&at24->lock);
+
+ return retval;
+}
+
+static ssize_t at24_bin_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct at24_data *at24;
+
+ at24 = dev_get_drvdata(container_of(kobj, struct device, kobj));
+ return at24_write(at24, buf, off, count);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * This lets other kernel code access the eeprom data. For example, it
+ * might hold a board's Ethernet address, or board-specific calibration
+ * data generated on the manufacturing floor.
+ */
+
+static ssize_t at24_macc_read(struct memory_accessor *macc, char *buf,
+ off_t offset, size_t count)
+{
+ struct at24_data *at24 = container_of(macc, struct at24_data, macc);
+
+ return at24_read(at24, buf, offset, count);
+}
+
+static ssize_t at24_macc_write(struct memory_accessor *macc, const char *buf,
+ off_t offset, size_t count)
+{
+ struct at24_data *at24 = container_of(macc, struct at24_data, macc);
+
+ return at24_write(at24, buf, offset, count);
+}
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef CONFIG_OF
+static void at24_get_ofdata(struct i2c_client *client,
+ struct at24_platform_data *chip)
+{
+ const __be32 *val;
+ struct device_node *node = client->dev.of_node;
+
+ if (node) {
+ if (of_get_property(node, "read-only", NULL))
+ chip->flags |= AT24_FLAG_READONLY;
+ val = of_get_property(node, "pagesize", NULL);
+ if (val)
+ chip->page_size = be32_to_cpup(val);
+ }
+}
+#else
+static void at24_get_ofdata(struct i2c_client *client,
+ struct at24_platform_data *chip)
+{ }
+#endif /* CONFIG_OF */
+
+static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ struct at24_platform_data chip;
+ bool writable;
+ int use_smbus = 0;
+ struct at24_data *at24;
+ int err;
+ unsigned i, num_addresses;
+ kernel_ulong_t magic;
+
+ if (client->dev.platform_data) {
+ chip = *(struct at24_platform_data *)client->dev.platform_data;
+ } else {
+ if (!id->driver_data) {
+ err = -ENODEV;
+ goto err_out;
+ }
+ magic = id->driver_data;
+ chip.byte_len = BIT(magic & AT24_BITMASK(AT24_SIZE_BYTELEN));
+ magic >>= AT24_SIZE_BYTELEN;
+ chip.flags = magic & AT24_BITMASK(AT24_SIZE_FLAGS);
+ /*
+ * This is slow, but we can't know all eeproms, so we better
+ * play safe. Specifying custom eeprom-types via platform_data
+ * is recommended anyhow.
+ */
+ chip.page_size = 1;
+
+ /* update chipdata if OF is present */
+ at24_get_ofdata(client, &chip);
+
+ chip.setup = NULL;
+ chip.context = NULL;
+ }
+
+ if (!is_power_of_2(chip.byte_len))
+ dev_warn(&client->dev,
+ "byte_len looks suspicious (no power of 2)!\n");
+ if (!chip.page_size) {
+ dev_err(&client->dev, "page_size must not be 0!\n");
+ err = -EINVAL;
+ goto err_out;
+ }
+ if (!is_power_of_2(chip.page_size))
+ dev_warn(&client->dev,
+ "page_size looks suspicious (no power of 2)!\n");
+
+ /* Use I2C operations unless we're stuck with SMBus extensions. */
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ if (chip.flags & AT24_FLAG_ADDR16) {
+ err = -EPFNOSUPPORT;
+ goto err_out;
+ }
+ if (i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_I2C_BLOCK)) {
+ use_smbus = I2C_SMBUS_I2C_BLOCK_DATA;
+ } else if (i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_WORD_DATA)) {
+ use_smbus = I2C_SMBUS_WORD_DATA;
+ } else if (i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_BYTE_DATA)) {
+ use_smbus = I2C_SMBUS_BYTE_DATA;
+ } else {
+ err = -EPFNOSUPPORT;
+ goto err_out;
+ }
+ }
+
+ if (chip.flags & AT24_FLAG_TAKE8ADDR)
+ num_addresses = 8;
+ else
+ num_addresses = DIV_ROUND_UP(chip.byte_len,
+ (chip.flags & AT24_FLAG_ADDR16) ? 65536 : 256);
+
+ at24 = kzalloc(sizeof(struct at24_data) +
+ num_addresses * sizeof(struct i2c_client *), GFP_KERNEL);
+ if (!at24) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ mutex_init(&at24->lock);
+ at24->use_smbus = use_smbus;
+ at24->chip = chip;
+ at24->num_addresses = num_addresses;
+
+ /*
+ * Export the EEPROM bytes through sysfs, since that's convenient.
+ * By default, only root should see the data (maybe passwords etc)
+ */
+ sysfs_bin_attr_init(&at24->bin);
+ at24->bin.attr.name = "eeprom";
+ at24->bin.attr.mode = chip.flags & AT24_FLAG_IRUGO ? S_IRUGO : S_IRUSR;
+ at24->bin.read = at24_bin_read;
+ at24->bin.size = chip.byte_len;
+
+ at24->macc.read = at24_macc_read;
+
+ writable = !(chip.flags & AT24_FLAG_READONLY);
+ if (writable) {
+ if (!use_smbus || i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_WRITE_I2C_BLOCK)) {
+
+ unsigned write_max = chip.page_size;
+
+ at24->macc.write = at24_macc_write;
+
+ at24->bin.write = at24_bin_write;
+ at24->bin.attr.mode |= S_IWUSR;
+
+ if (write_max > io_limit)
+ write_max = io_limit;
+ if (use_smbus && write_max > I2C_SMBUS_BLOCK_MAX)
+ write_max = I2C_SMBUS_BLOCK_MAX;
+ at24->write_max = write_max;
+
+ /* buffer (data + address at the beginning) */
+ at24->writebuf = kmalloc(write_max + 2, GFP_KERNEL);
+ if (!at24->writebuf) {
+ err = -ENOMEM;
+ goto err_struct;
+ }
+ } else {
+ dev_warn(&client->dev,
+ "cannot write due to controller restrictions.");
+ }
+ }
+
+ at24->client[0] = client;
+
+ /* use dummy devices for multiple-address chips */
+ for (i = 1; i < num_addresses; i++) {
+ at24->client[i] = i2c_new_dummy(client->adapter,
+ client->addr + i);
+ if (!at24->client[i]) {
+ dev_err(&client->dev, "address 0x%02x unavailable\n",
+ client->addr + i);
+ err = -EADDRINUSE;
+ goto err_clients;
+ }
+ }
+
+ err = sysfs_create_bin_file(&client->dev.kobj, &at24->bin);
+ if (err)
+ goto err_clients;
+
+ i2c_set_clientdata(client, at24);
+
+ dev_info(&client->dev, "%zu byte %s EEPROM, %s, %u bytes/write\n",
+ at24->bin.size, client->name,
+ writable ? "writable" : "read-only", at24->write_max);
+ if (use_smbus == I2C_SMBUS_WORD_DATA ||
+ use_smbus == I2C_SMBUS_BYTE_DATA) {
+ dev_notice(&client->dev, "Falling back to %s reads, "
+ "performance will suffer\n", use_smbus ==
+ I2C_SMBUS_WORD_DATA ? "word" : "byte");
+ }
+
+ /* export data to kernel code */
+ if (chip.setup)
+ chip.setup(&at24->macc, chip.context);
+
+ return 0;
+
+err_clients:
+ for (i = 1; i < num_addresses; i++)
+ if (at24->client[i])
+ i2c_unregister_device(at24->client[i]);
+
+ kfree(at24->writebuf);
+err_struct:
+ kfree(at24);
+err_out:
+ dev_dbg(&client->dev, "probe error %d\n", err);
+ return err;
+}
+
+static int __devexit at24_remove(struct i2c_client *client)
+{
+ struct at24_data *at24;
+ int i;
+
+ at24 = i2c_get_clientdata(client);
+ sysfs_remove_bin_file(&client->dev.kobj, &at24->bin);
+
+ for (i = 1; i < at24->num_addresses; i++)
+ i2c_unregister_device(at24->client[i]);
+
+ kfree(at24->writebuf);
+ kfree(at24);
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static struct i2c_driver at24_driver = {
+ .driver = {
+ .name = "at24",
+ .owner = THIS_MODULE,
+ },
+ .probe = at24_probe,
+ .remove = __devexit_p(at24_remove),
+ .id_table = at24_ids,
+};
+
+static int __init at24_init(void)
+{
+ if (!io_limit) {
+ pr_err("at24: io_limit must not be 0!\n");
+ return -EINVAL;
+ }
+
+ io_limit = rounddown_pow_of_two(io_limit);
+ return i2c_add_driver(&at24_driver);
+}
+module_init(at24_init);
+
+static void __exit at24_exit(void)
+{
+ i2c_del_driver(&at24_driver);
+}
+module_exit(at24_exit);
+
+MODULE_DESCRIPTION("Driver for most I2C EEPROMs");
+MODULE_AUTHOR("David Brownell and Wolfram Sang");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
new file mode 100644
index 00000000..01ab3c9b
--- /dev/null
+++ b/drivers/misc/eeprom/at25.c
@@ -0,0 +1,413 @@
+/*
+ * at25.c -- support most SPI EEPROMs, such as Atmel AT25 models
+ *
+ * Copyright (C) 2006 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/sched.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/eeprom.h>
+
+
+/*
+ * NOTE: this is an *EEPROM* driver. The vagaries of product naming
+ * mean that some AT25 products are EEPROMs, and others are FLASH.
+ * Handle FLASH chips with the drivers/mtd/devices/m25p80.c driver,
+ * not this one!
+ */
+
+struct at25_data {
+ struct spi_device *spi;
+ struct memory_accessor mem;
+ struct mutex lock;
+ struct spi_eeprom chip;
+ struct bin_attribute bin;
+ unsigned addrlen;
+};
+
+#define AT25_WREN 0x06 /* latch the write enable */
+#define AT25_WRDI 0x04 /* reset the write enable */
+#define AT25_RDSR 0x05 /* read status register */
+#define AT25_WRSR 0x01 /* write status register */
+#define AT25_READ 0x03 /* read byte(s) */
+#define AT25_WRITE 0x02 /* write byte(s)/sector */
+
+#define AT25_SR_nRDY 0x01 /* nRDY = write-in-progress */
+#define AT25_SR_WEN 0x02 /* write enable (latched) */
+#define AT25_SR_BP0 0x04 /* BP for software writeprotect */
+#define AT25_SR_BP1 0x08
+#define AT25_SR_WPEN 0x80 /* writeprotect enable */
+
+
+#define EE_MAXADDRLEN 3 /* 24 bit addresses, up to 2 MBytes */
+
+/* Specs often allow 5 msec for a page write, sometimes 20 msec;
+ * it's important to recover from write timeouts.
+ */
+#define EE_TIMEOUT 25
+
+/*-------------------------------------------------------------------------*/
+
+#define io_limit PAGE_SIZE /* bytes */
+
+static ssize_t
+at25_ee_read(
+ struct at25_data *at25,
+ char *buf,
+ unsigned offset,
+ size_t count
+)
+{
+ u8 command[EE_MAXADDRLEN + 1];
+ u8 *cp;
+ ssize_t status;
+ struct spi_transfer t[2];
+ struct spi_message m;
+
+ if (unlikely(offset >= at25->bin.size))
+ return 0;
+ if ((offset + count) > at25->bin.size)
+ count = at25->bin.size - offset;
+ if (unlikely(!count))
+ return count;
+
+ cp = command;
+ *cp++ = AT25_READ;
+
+ /* 8/16/24-bit address is written MSB first */
+ switch (at25->addrlen) {
+ default: /* case 3 */
+ *cp++ = offset >> 16;
+ case 2:
+ *cp++ = offset >> 8;
+ case 1:
+ case 0: /* can't happen: for better codegen */
+ *cp++ = offset >> 0;
+ }
+
+ spi_message_init(&m);
+ memset(t, 0, sizeof t);
+
+ t[0].tx_buf = command;
+ t[0].len = at25->addrlen + 1;
+ spi_message_add_tail(&t[0], &m);
+
+ t[1].rx_buf = buf;
+ t[1].len = count;
+ spi_message_add_tail(&t[1], &m);
+
+ mutex_lock(&at25->lock);
+
+ /* Read it all at once.
+ *
+ * REVISIT that's potentially a problem with large chips, if
+ * other devices on the bus need to be accessed regularly or
+ * this chip is clocked very slowly
+ */
+ status = spi_sync(at25->spi, &m);
+ dev_dbg(&at25->spi->dev,
+ "read %Zd bytes at %d --> %d\n",
+ count, offset, (int) status);
+
+ mutex_unlock(&at25->lock);
+ return status ? status : count;
+}
+
+static ssize_t
+at25_bin_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct device *dev;
+ struct at25_data *at25;
+
+ dev = container_of(kobj, struct device, kobj);
+ at25 = dev_get_drvdata(dev);
+
+ return at25_ee_read(at25, buf, off, count);
+}
+
+
+static ssize_t
+at25_ee_write(struct at25_data *at25, const char *buf, loff_t off,
+ size_t count)
+{
+ ssize_t status = 0;
+ unsigned written = 0;
+ unsigned buf_size;
+ u8 *bounce;
+
+ if (unlikely(off >= at25->bin.size))
+ return -EFBIG;
+ if ((off + count) > at25->bin.size)
+ count = at25->bin.size - off;
+ if (unlikely(!count))
+ return count;
+
+ /* Temp buffer starts with command and address */
+ buf_size = at25->chip.page_size;
+ if (buf_size > io_limit)
+ buf_size = io_limit;
+ bounce = kmalloc(buf_size + at25->addrlen + 1, GFP_KERNEL);
+ if (!bounce)
+ return -ENOMEM;
+
+ /* For write, rollover is within the page ... so we write at
+ * most one page, then manually roll over to the next page.
+ */
+ bounce[0] = AT25_WRITE;
+ mutex_lock(&at25->lock);
+ do {
+ unsigned long timeout, retries;
+ unsigned segment;
+ unsigned offset = (unsigned) off;
+ u8 *cp = bounce + 1;
+ int sr;
+
+ *cp = AT25_WREN;
+ status = spi_write(at25->spi, cp, 1);
+ if (status < 0) {
+ dev_dbg(&at25->spi->dev, "WREN --> %d\n",
+ (int) status);
+ break;
+ }
+
+ /* 8/16/24-bit address is written MSB first */
+ switch (at25->addrlen) {
+ default: /* case 3 */
+ *cp++ = offset >> 16;
+ case 2:
+ *cp++ = offset >> 8;
+ case 1:
+ case 0: /* can't happen: for better codegen */
+ *cp++ = offset >> 0;
+ }
+
+ /* Write as much of a page as we can */
+ segment = buf_size - (offset % buf_size);
+ if (segment > count)
+ segment = count;
+ memcpy(cp, buf, segment);
+ status = spi_write(at25->spi, bounce,
+ segment + at25->addrlen + 1);
+ dev_dbg(&at25->spi->dev,
+ "write %u bytes at %u --> %d\n",
+ segment, offset, (int) status);
+ if (status < 0)
+ break;
+
+ /* REVISIT this should detect (or prevent) failed writes
+ * to readonly sections of the EEPROM...
+ */
+
+ /* Wait for non-busy status */
+ timeout = jiffies + msecs_to_jiffies(EE_TIMEOUT);
+ retries = 0;
+ do {
+
+ sr = spi_w8r8(at25->spi, AT25_RDSR);
+ if (sr < 0 || (sr & AT25_SR_nRDY)) {
+ dev_dbg(&at25->spi->dev,
+ "rdsr --> %d (%02x)\n", sr, sr);
+ /* at HZ=100, this is sloooow */
+ msleep(1);
+ continue;
+ }
+ if (!(sr & AT25_SR_nRDY))
+ break;
+ } while (retries++ < 3 || time_before_eq(jiffies, timeout));
+
+ if ((sr < 0) || (sr & AT25_SR_nRDY)) {
+ dev_err(&at25->spi->dev,
+ "write %d bytes offset %d, "
+ "timeout after %u msecs\n",
+ segment, offset,
+ jiffies_to_msecs(jiffies -
+ (timeout - EE_TIMEOUT)));
+ status = -ETIMEDOUT;
+ break;
+ }
+
+ off += segment;
+ buf += segment;
+ count -= segment;
+ written += segment;
+
+ } while (count > 0);
+
+ mutex_unlock(&at25->lock);
+
+ kfree(bounce);
+ return written ? written : status;
+}
+
+static ssize_t
+at25_bin_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct device *dev;
+ struct at25_data *at25;
+
+ dev = container_of(kobj, struct device, kobj);
+ at25 = dev_get_drvdata(dev);
+
+ return at25_ee_write(at25, buf, off, count);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* Let in-kernel code access the eeprom data. */
+
+static ssize_t at25_mem_read(struct memory_accessor *mem, char *buf,
+ off_t offset, size_t count)
+{
+ struct at25_data *at25 = container_of(mem, struct at25_data, mem);
+
+ return at25_ee_read(at25, buf, offset, count);
+}
+
+static ssize_t at25_mem_write(struct memory_accessor *mem, const char *buf,
+ off_t offset, size_t count)
+{
+ struct at25_data *at25 = container_of(mem, struct at25_data, mem);
+
+ return at25_ee_write(at25, buf, offset, count);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int at25_probe(struct spi_device *spi)
+{
+ struct at25_data *at25 = NULL;
+ const struct spi_eeprom *chip;
+ int err;
+ int sr;
+ int addrlen;
+
+ /* Chip description */
+ chip = spi->dev.platform_data;
+ if (!chip) {
+ dev_dbg(&spi->dev, "no chip description\n");
+ err = -ENODEV;
+ goto fail;
+ }
+
+ /* For now we only support 8/16/24 bit addressing */
+ if (chip->flags & EE_ADDR1)
+ addrlen = 1;
+ else if (chip->flags & EE_ADDR2)
+ addrlen = 2;
+ else if (chip->flags & EE_ADDR3)
+ addrlen = 3;
+ else {
+ dev_dbg(&spi->dev, "unsupported address type\n");
+ err = -EINVAL;
+ goto fail;
+ }
+
+ /* Ping the chip ... the status register is pretty portable,
+ * unlike probing manufacturer IDs. We do expect that system
+ * firmware didn't write it in the past few milliseconds!
+ */
+ sr = spi_w8r8(spi, AT25_RDSR);
+ if (sr < 0 || sr & AT25_SR_nRDY) {
+ dev_dbg(&spi->dev, "rdsr --> %d (%02x)\n", sr, sr);
+ err = -ENXIO;
+ goto fail;
+ }
+
+ if (!(at25 = kzalloc(sizeof *at25, GFP_KERNEL))) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ mutex_init(&at25->lock);
+ at25->chip = *chip;
+ at25->spi = spi_dev_get(spi);
+ dev_set_drvdata(&spi->dev, at25);
+ at25->addrlen = addrlen;
+
+ /* Export the EEPROM bytes through sysfs, since that's convenient.
+ * And maybe to other kernel code; it might hold a board's Ethernet
+ * address, or board-specific calibration data generated on the
+ * manufacturing floor.
+ *
+ * Default to root-only access to the data; EEPROMs often hold data
+ * that's sensitive for read and/or write, like ethernet addresses,
+ * security codes, board-specific manufacturing calibrations, etc.
+ */
+ sysfs_bin_attr_init(&at25->bin);
+ at25->bin.attr.name = "eeprom";
+ at25->bin.attr.mode = S_IRUSR;
+ at25->bin.read = at25_bin_read;
+ at25->mem.read = at25_mem_read;
+
+ at25->bin.size = at25->chip.byte_len;
+ if (!(chip->flags & EE_READONLY)) {
+ at25->bin.write = at25_bin_write;
+ at25->bin.attr.mode |= S_IWUSR;
+ at25->mem.write = at25_mem_write;
+ }
+
+ err = sysfs_create_bin_file(&spi->dev.kobj, &at25->bin);
+ if (err)
+ goto fail;
+
+ if (chip->setup)
+ chip->setup(&at25->mem, chip->context);
+
+ dev_info(&spi->dev, "%Zd %s %s eeprom%s, pagesize %u\n",
+ (at25->bin.size < 1024)
+ ? at25->bin.size
+ : (at25->bin.size / 1024),
+ (at25->bin.size < 1024) ? "Byte" : "KByte",
+ at25->chip.name,
+ (chip->flags & EE_READONLY) ? " (readonly)" : "",
+ at25->chip.page_size);
+ return 0;
+fail:
+ dev_dbg(&spi->dev, "probe err %d\n", err);
+ kfree(at25);
+ return err;
+}
+
+static int __devexit at25_remove(struct spi_device *spi)
+{
+ struct at25_data *at25;
+
+ at25 = dev_get_drvdata(&spi->dev);
+ sysfs_remove_bin_file(&spi->dev.kobj, &at25->bin);
+ kfree(at25);
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static struct spi_driver at25_driver = {
+ .driver = {
+ .name = "at25",
+ .owner = THIS_MODULE,
+ },
+ .probe = at25_probe,
+ .remove = __devexit_p(at25_remove),
+};
+
+module_spi_driver(at25_driver);
+
+MODULE_DESCRIPTION("Driver for most SPI EEPROMs");
+MODULE_AUTHOR("David Brownell");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:at25");
diff --git a/drivers/misc/eeprom/digsy_mtc_eeprom.c b/drivers/misc/eeprom/digsy_mtc_eeprom.c
new file mode 100644
index 00000000..66d9e1ba
--- /dev/null
+++ b/drivers/misc/eeprom/digsy_mtc_eeprom.c
@@ -0,0 +1,85 @@
+/*
+ * EEPROMs access control driver for display configuration EEPROMs
+ * on DigsyMTC board.
+ *
+ * (C) 2011 DENX Software Engineering, Anatolij Gustschin <agust@denx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_gpio.h>
+#include <linux/eeprom_93xx46.h>
+
+#define GPIO_EEPROM_CLK 216
+#define GPIO_EEPROM_CS 210
+#define GPIO_EEPROM_DI 217
+#define GPIO_EEPROM_DO 249
+#define GPIO_EEPROM_OE 255
+#define EE_SPI_BUS_NUM 1
+
+static void digsy_mtc_op_prepare(void *p)
+{
+ /* enable */
+ gpio_set_value(GPIO_EEPROM_OE, 0);
+}
+
+static void digsy_mtc_op_finish(void *p)
+{
+ /* disable */
+ gpio_set_value(GPIO_EEPROM_OE, 1);
+}
+
+struct eeprom_93xx46_platform_data digsy_mtc_eeprom_data = {
+ .flags = EE_ADDR8,
+ .prepare = digsy_mtc_op_prepare,
+ .finish = digsy_mtc_op_finish,
+};
+
+static struct spi_gpio_platform_data eeprom_spi_gpio_data = {
+ .sck = GPIO_EEPROM_CLK,
+ .mosi = GPIO_EEPROM_DI,
+ .miso = GPIO_EEPROM_DO,
+ .num_chipselect = 1,
+};
+
+static struct platform_device digsy_mtc_eeprom = {
+ .name = "spi_gpio",
+ .id = EE_SPI_BUS_NUM,
+ .dev = {
+ .platform_data = &eeprom_spi_gpio_data,
+ },
+};
+
+static struct spi_board_info digsy_mtc_eeprom_info[] __initdata = {
+ {
+ .modalias = "93xx46",
+ .max_speed_hz = 1000000,
+ .bus_num = EE_SPI_BUS_NUM,
+ .chip_select = 0,
+ .mode = SPI_MODE_0,
+ .controller_data = (void *)GPIO_EEPROM_CS,
+ .platform_data = &digsy_mtc_eeprom_data,
+ },
+};
+
+static int __init digsy_mtc_eeprom_devices_init(void)
+{
+ int ret;
+
+ ret = gpio_request_one(GPIO_EEPROM_OE, GPIOF_OUT_INIT_HIGH,
+ "93xx46 EEPROMs OE");
+ if (ret) {
+ pr_err("can't request gpio %d\n", GPIO_EEPROM_OE);
+ return ret;
+ }
+ spi_register_board_info(digsy_mtc_eeprom_info,
+ ARRAY_SIZE(digsy_mtc_eeprom_info));
+ return platform_device_register(&digsy_mtc_eeprom);
+}
+device_initcall(digsy_mtc_eeprom_devices_init);
diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c
new file mode 100644
index 00000000..c169e076
--- /dev/null
+++ b/drivers/misc/eeprom/eeprom.c
@@ -0,0 +1,238 @@
+/*
+ * Copyright (C) 1998, 1999 Frodo Looijaard <frodol@dds.nl> and
+ * Philip Edelbrock <phil@netroedge.com>
+ * Copyright (C) 2003 Greg Kroah-Hartman <greg@kroah.com>
+ * Copyright (C) 2003 IBM Corp.
+ * Copyright (C) 2004 Jean Delvare <khali@linux-fr.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+
+/* Addresses to scan */
+static const unsigned short normal_i2c[] = { 0x50, 0x51, 0x52, 0x53, 0x54,
+ 0x55, 0x56, 0x57, I2C_CLIENT_END };
+
+
+/* Size of EEPROM in bytes */
+#define EEPROM_SIZE 256
+
+/* possible types of eeprom devices */
+enum eeprom_nature {
+ UNKNOWN,
+ VAIO,
+};
+
+/* Each client has this additional data */
+struct eeprom_data {
+ struct mutex update_lock;
+ u8 valid; /* bitfield, bit!=0 if slice is valid */
+ unsigned long last_updated[8]; /* In jiffies, 8 slices */
+ u8 data[EEPROM_SIZE]; /* Register values */
+ enum eeprom_nature nature;
+};
+
+
+static void eeprom_update_client(struct i2c_client *client, u8 slice)
+{
+ struct eeprom_data *data = i2c_get_clientdata(client);
+ int i;
+
+ mutex_lock(&data->update_lock);
+
+ if (!(data->valid & (1 << slice)) ||
+ time_after(jiffies, data->last_updated[slice] + 300 * HZ)) {
+ dev_dbg(&client->dev, "Starting eeprom update, slice %u\n", slice);
+
+ if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_READ_I2C_BLOCK)) {
+ for (i = slice << 5; i < (slice + 1) << 5; i += 32)
+ if (i2c_smbus_read_i2c_block_data(client, i,
+ 32, data->data + i)
+ != 32)
+ goto exit;
+ } else {
+ for (i = slice << 5; i < (slice + 1) << 5; i += 2) {
+ int word = i2c_smbus_read_word_data(client, i);
+ if (word < 0)
+ goto exit;
+ data->data[i] = word & 0xff;
+ data->data[i + 1] = word >> 8;
+ }
+ }
+ data->last_updated[slice] = jiffies;
+ data->valid |= (1 << slice);
+ }
+exit:
+ mutex_unlock(&data->update_lock);
+}
+
+static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(container_of(kobj, struct device, kobj));
+ struct eeprom_data *data = i2c_get_clientdata(client);
+ u8 slice;
+
+ if (off > EEPROM_SIZE)
+ return 0;
+ if (off + count > EEPROM_SIZE)
+ count = EEPROM_SIZE - off;
+
+ /* Only refresh slices which contain requested bytes */
+ for (slice = off >> 5; slice <= (off + count - 1) >> 5; slice++)
+ eeprom_update_client(client, slice);
+
+ /* Hide Vaio private settings to regular users:
+ - BIOS passwords: bytes 0x00 to 0x0f
+ - UUID: bytes 0x10 to 0x1f
+ - Serial number: 0xc0 to 0xdf */
+ if (data->nature == VAIO && !capable(CAP_SYS_ADMIN)) {
+ int i;
+
+ for (i = 0; i < count; i++) {
+ if ((off + i <= 0x1f) ||
+ (off + i >= 0xc0 && off + i <= 0xdf))
+ buf[i] = 0;
+ else
+ buf[i] = data->data[off + i];
+ }
+ } else {
+ memcpy(buf, &data->data[off], count);
+ }
+
+ return count;
+}
+
+static struct bin_attribute eeprom_attr = {
+ .attr = {
+ .name = "eeprom",
+ .mode = S_IRUGO,
+ },
+ .size = EEPROM_SIZE,
+ .read = eeprom_read,
+};
+
+/* Return 0 if detection is successful, -ENODEV otherwise */
+static int eeprom_detect(struct i2c_client *client, struct i2c_board_info *info)
+{
+ struct i2c_adapter *adapter = client->adapter;
+
+ /* EDID EEPROMs are often 24C00 EEPROMs, which answer to all
+ addresses 0x50-0x57, but we only care about 0x50. So decline
+ attaching to addresses >= 0x51 on DDC buses */
+ if (!(adapter->class & I2C_CLASS_SPD) && client->addr >= 0x51)
+ return -ENODEV;
+
+ /* There are four ways we can read the EEPROM data:
+ (1) I2C block reads (faster, but unsupported by most adapters)
+ (2) Word reads (128% overhead)
+ (3) Consecutive byte reads (88% overhead, unsafe)
+ (4) Regular byte data reads (265% overhead)
+ The third and fourth methods are not implemented by this driver
+ because all known adapters support one of the first two. */
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_READ_WORD_DATA)
+ && !i2c_check_functionality(adapter, I2C_FUNC_SMBUS_READ_I2C_BLOCK))
+ return -ENODEV;
+
+ strlcpy(info->type, "eeprom", I2C_NAME_SIZE);
+
+ return 0;
+}
+
+static int eeprom_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ struct eeprom_data *data;
+ int err;
+
+ if (!(data = kzalloc(sizeof(struct eeprom_data), GFP_KERNEL))) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ memset(data->data, 0xff, EEPROM_SIZE);
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->update_lock);
+ data->nature = UNKNOWN;
+
+ /* Detect the Vaio nature of EEPROMs.
+ We use the "PCG-" or "VGN-" prefix as the signature. */
+ if (client->addr == 0x57
+ && i2c_check_functionality(adapter, I2C_FUNC_SMBUS_READ_BYTE_DATA)) {
+ char name[4];
+
+ name[0] = i2c_smbus_read_byte_data(client, 0x80);
+ name[1] = i2c_smbus_read_byte_data(client, 0x81);
+ name[2] = i2c_smbus_read_byte_data(client, 0x82);
+ name[3] = i2c_smbus_read_byte_data(client, 0x83);
+
+ if (!memcmp(name, "PCG-", 4) || !memcmp(name, "VGN-", 4)) {
+ dev_info(&client->dev, "Vaio EEPROM detected, "
+ "enabling privacy protection\n");
+ data->nature = VAIO;
+ }
+ }
+
+ /* create the sysfs eeprom file */
+ err = sysfs_create_bin_file(&client->dev.kobj, &eeprom_attr);
+ if (err)
+ goto exit_kfree;
+
+ return 0;
+
+exit_kfree:
+ kfree(data);
+exit:
+ return err;
+}
+
+static int eeprom_remove(struct i2c_client *client)
+{
+ sysfs_remove_bin_file(&client->dev.kobj, &eeprom_attr);
+ kfree(i2c_get_clientdata(client));
+
+ return 0;
+}
+
+static const struct i2c_device_id eeprom_id[] = {
+ { "eeprom", 0 },
+ { }
+};
+
+static struct i2c_driver eeprom_driver = {
+ .driver = {
+ .name = "eeprom",
+ },
+ .probe = eeprom_probe,
+ .remove = eeprom_remove,
+ .id_table = eeprom_id,
+
+ .class = I2C_CLASS_DDC | I2C_CLASS_SPD,
+ .detect = eeprom_detect,
+ .address_list = normal_i2c,
+};
+
+module_i2c_driver(eeprom_driver);
+
+MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl> and "
+ "Philip Edelbrock <phil@netroedge.com> and "
+ "Greg Kroah-Hartman <greg@kroah.com>");
+MODULE_DESCRIPTION("I2C EEPROM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/eeprom/eeprom_93cx6.c b/drivers/misc/eeprom/eeprom_93cx6.c
new file mode 100644
index 00000000..0ff4b021
--- /dev/null
+++ b/drivers/misc/eeprom/eeprom_93cx6.c
@@ -0,0 +1,321 @@
+/*
+ * Copyright (C) 2004 - 2006 rt2x00 SourceForge Project
+ * <http://rt2x00.serialmonkey.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Module: eeprom_93cx6
+ * Abstract: EEPROM reader routines for 93cx6 chipsets.
+ * Supported chipsets: 93c46 & 93c66.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/eeprom_93cx6.h>
+
+MODULE_AUTHOR("http://rt2x00.serialmonkey.com");
+MODULE_VERSION("1.0");
+MODULE_DESCRIPTION("EEPROM 93cx6 chip driver");
+MODULE_LICENSE("GPL");
+
+static inline void eeprom_93cx6_pulse_high(struct eeprom_93cx6 *eeprom)
+{
+ eeprom->reg_data_clock = 1;
+ eeprom->register_write(eeprom);
+
+ /*
+ * Add a short delay for the pulse to work.
+ * According to the specifications the "maximum minimum"
+ * time should be 450ns.
+ */
+ ndelay(450);
+}
+
+static inline void eeprom_93cx6_pulse_low(struct eeprom_93cx6 *eeprom)
+{
+ eeprom->reg_data_clock = 0;
+ eeprom->register_write(eeprom);
+
+ /*
+ * Add a short delay for the pulse to work.
+ * According to the specifications the "maximum minimum"
+ * time should be 450ns.
+ */
+ ndelay(450);
+}
+
+static void eeprom_93cx6_startup(struct eeprom_93cx6 *eeprom)
+{
+ /*
+ * Clear all flags, and enable chip select.
+ */
+ eeprom->register_read(eeprom);
+ eeprom->reg_data_in = 0;
+ eeprom->reg_data_out = 0;
+ eeprom->reg_data_clock = 0;
+ eeprom->reg_chip_select = 1;
+ eeprom->drive_data = 1;
+ eeprom->register_write(eeprom);
+
+ /*
+ * kick a pulse.
+ */
+ eeprom_93cx6_pulse_high(eeprom);
+ eeprom_93cx6_pulse_low(eeprom);
+}
+
+static void eeprom_93cx6_cleanup(struct eeprom_93cx6 *eeprom)
+{
+ /*
+ * Clear chip_select and data_in flags.
+ */
+ eeprom->register_read(eeprom);
+ eeprom->reg_data_in = 0;
+ eeprom->reg_chip_select = 0;
+ eeprom->register_write(eeprom);
+
+ /*
+ * kick a pulse.
+ */
+ eeprom_93cx6_pulse_high(eeprom);
+ eeprom_93cx6_pulse_low(eeprom);
+}
+
+static void eeprom_93cx6_write_bits(struct eeprom_93cx6 *eeprom,
+ const u16 data, const u16 count)
+{
+ unsigned int i;
+
+ eeprom->register_read(eeprom);
+
+ /*
+ * Clear data flags.
+ */
+ eeprom->reg_data_in = 0;
+ eeprom->reg_data_out = 0;
+ eeprom->drive_data = 1;
+
+ /*
+ * Start writing all bits.
+ */
+ for (i = count; i > 0; i--) {
+ /*
+ * Check if this bit needs to be set.
+ */
+ eeprom->reg_data_in = !!(data & (1 << (i - 1)));
+
+ /*
+ * Write the bit to the eeprom register.
+ */
+ eeprom->register_write(eeprom);
+
+ /*
+ * Kick a pulse.
+ */
+ eeprom_93cx6_pulse_high(eeprom);
+ eeprom_93cx6_pulse_low(eeprom);
+ }
+
+ eeprom->reg_data_in = 0;
+ eeprom->register_write(eeprom);
+}
+
+static void eeprom_93cx6_read_bits(struct eeprom_93cx6 *eeprom,
+ u16 *data, const u16 count)
+{
+ unsigned int i;
+ u16 buf = 0;
+
+ eeprom->register_read(eeprom);
+
+ /*
+ * Clear data flags.
+ */
+ eeprom->reg_data_in = 0;
+ eeprom->reg_data_out = 0;
+ eeprom->drive_data = 0;
+
+ /*
+ * Start reading all bits.
+ */
+ for (i = count; i > 0; i--) {
+ eeprom_93cx6_pulse_high(eeprom);
+
+ eeprom->register_read(eeprom);
+
+ /*
+ * Clear data_in flag.
+ */
+ eeprom->reg_data_in = 0;
+
+ /*
+ * Read if the bit has been set.
+ */
+ if (eeprom->reg_data_out)
+ buf |= (1 << (i - 1));
+
+ eeprom_93cx6_pulse_low(eeprom);
+ }
+
+ *data = buf;
+}
+
+/**
+ * eeprom_93cx6_read - Read multiple words from eeprom
+ * @eeprom: Pointer to eeprom structure
+ * @word: Word index from where we should start reading
+ * @data: target pointer where the information will have to be stored
+ *
+ * This function will read the eeprom data as host-endian word
+ * into the given data pointer.
+ */
+void eeprom_93cx6_read(struct eeprom_93cx6 *eeprom, const u8 word,
+ u16 *data)
+{
+ u16 command;
+
+ /*
+ * Initialize the eeprom register
+ */
+ eeprom_93cx6_startup(eeprom);
+
+ /*
+ * Select the read opcode and the word to be read.
+ */
+ command = (PCI_EEPROM_READ_OPCODE << eeprom->width) | word;
+ eeprom_93cx6_write_bits(eeprom, command,
+ PCI_EEPROM_WIDTH_OPCODE + eeprom->width);
+
+ /*
+ * Read the requested 16 bits.
+ */
+ eeprom_93cx6_read_bits(eeprom, data, 16);
+
+ /*
+ * Cleanup eeprom register.
+ */
+ eeprom_93cx6_cleanup(eeprom);
+}
+EXPORT_SYMBOL_GPL(eeprom_93cx6_read);
+
+/**
+ * eeprom_93cx6_multiread - Read multiple words from eeprom
+ * @eeprom: Pointer to eeprom structure
+ * @word: Word index from where we should start reading
+ * @data: target pointer where the information will have to be stored
+ * @words: Number of words that should be read.
+ *
+ * This function will read all requested words from the eeprom,
+ * this is done by calling eeprom_93cx6_read() multiple times.
+ * But with the additional change that while the eeprom_93cx6_read
+ * will return host ordered bytes, this method will return little
+ * endian words.
+ */
+void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom, const u8 word,
+ __le16 *data, const u16 words)
+{
+ unsigned int i;
+ u16 tmp;
+
+ for (i = 0; i < words; i++) {
+ tmp = 0;
+ eeprom_93cx6_read(eeprom, word + i, &tmp);
+ data[i] = cpu_to_le16(tmp);
+ }
+}
+EXPORT_SYMBOL_GPL(eeprom_93cx6_multiread);
+
+/**
+ * eeprom_93cx6_wren - set the write enable state
+ * @eeprom: Pointer to eeprom structure
+ * @enable: true to enable writes, otherwise disable writes
+ *
+ * Set the EEPROM write enable state to either allow or deny
+ * writes depending on the @enable value.
+ */
+void eeprom_93cx6_wren(struct eeprom_93cx6 *eeprom, bool enable)
+{
+ u16 command;
+
+ /* start the command */
+ eeprom_93cx6_startup(eeprom);
+
+ /* create command to enable/disable */
+
+ command = enable ? PCI_EEPROM_EWEN_OPCODE : PCI_EEPROM_EWDS_OPCODE;
+ command <<= (eeprom->width - 2);
+
+ eeprom_93cx6_write_bits(eeprom, command,
+ PCI_EEPROM_WIDTH_OPCODE + eeprom->width);
+
+ eeprom_93cx6_cleanup(eeprom);
+}
+EXPORT_SYMBOL_GPL(eeprom_93cx6_wren);
+
+/**
+ * eeprom_93cx6_write - write data to the EEPROM
+ * @eeprom: Pointer to eeprom structure
+ * @addr: Address to write data to.
+ * @data: The data to write to address @addr.
+ *
+ * Write the @data to the specified @addr in the EEPROM and
+ * waiting for the device to finish writing.
+ *
+ * Note, since we do not expect large number of write operations
+ * we delay in between parts of the operation to avoid using excessive
+ * amounts of CPU time busy waiting.
+ */
+void eeprom_93cx6_write(struct eeprom_93cx6 *eeprom, u8 addr, u16 data)
+{
+ int timeout = 100;
+ u16 command;
+
+ /* start the command */
+ eeprom_93cx6_startup(eeprom);
+
+ command = PCI_EEPROM_WRITE_OPCODE << eeprom->width;
+ command |= addr;
+
+ /* send write command */
+ eeprom_93cx6_write_bits(eeprom, command,
+ PCI_EEPROM_WIDTH_OPCODE + eeprom->width);
+
+ /* send data */
+ eeprom_93cx6_write_bits(eeprom, data, 16);
+
+ /* get ready to check for busy */
+ eeprom->drive_data = 0;
+ eeprom->reg_chip_select = 1;
+ eeprom->register_write(eeprom);
+
+ /* wait at-least 250ns to get DO to be the busy signal */
+ usleep_range(1000, 2000);
+
+ /* wait for DO to go high to signify finish */
+
+ while (true) {
+ eeprom->register_read(eeprom);
+
+ if (eeprom->reg_data_out)
+ break;
+
+ usleep_range(1000, 2000);
+
+ if (--timeout <= 0) {
+ printk(KERN_ERR "%s: timeout\n", __func__);
+ break;
+ }
+ }
+
+ eeprom_93cx6_cleanup(eeprom);
+}
+EXPORT_SYMBOL_GPL(eeprom_93cx6_write);
diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c
new file mode 100644
index 00000000..ce3fe363
--- /dev/null
+++ b/drivers/misc/eeprom/eeprom_93xx46.c
@@ -0,0 +1,400 @@
+/*
+ * Driver for 93xx46 EEPROMs
+ *
+ * (C) 2011 DENX Software Engineering, Anatolij Gustschin <agust@denx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/sysfs.h>
+#include <linux/eeprom_93xx46.h>
+
+#define OP_START 0x4
+#define OP_WRITE (OP_START | 0x1)
+#define OP_READ (OP_START | 0x2)
+#define ADDR_EWDS 0x00
+#define ADDR_ERAL 0x20
+#define ADDR_EWEN 0x30
+
+struct eeprom_93xx46_dev {
+ struct spi_device *spi;
+ struct eeprom_93xx46_platform_data *pdata;
+ struct bin_attribute bin;
+ struct mutex lock;
+ int addrlen;
+};
+
+static ssize_t
+eeprom_93xx46_bin_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct eeprom_93xx46_dev *edev;
+ struct device *dev;
+ struct spi_message m;
+ struct spi_transfer t[2];
+ int bits, ret;
+ u16 cmd_addr;
+
+ dev = container_of(kobj, struct device, kobj);
+ edev = dev_get_drvdata(dev);
+
+ if (unlikely(off >= edev->bin.size))
+ return 0;
+ if ((off + count) > edev->bin.size)
+ count = edev->bin.size - off;
+ if (unlikely(!count))
+ return count;
+
+ cmd_addr = OP_READ << edev->addrlen;
+
+ if (edev->addrlen == 7) {
+ cmd_addr |= off & 0x7f;
+ bits = 10;
+ } else {
+ cmd_addr |= off & 0x3f;
+ bits = 9;
+ }
+
+ dev_dbg(&edev->spi->dev, "read cmd 0x%x, %d Hz\n",
+ cmd_addr, edev->spi->max_speed_hz);
+
+ spi_message_init(&m);
+ memset(t, 0, sizeof(t));
+
+ t[0].tx_buf = (char *)&cmd_addr;
+ t[0].len = 2;
+ t[0].bits_per_word = bits;
+ spi_message_add_tail(&t[0], &m);
+
+ t[1].rx_buf = buf;
+ t[1].len = count;
+ t[1].bits_per_word = 8;
+ spi_message_add_tail(&t[1], &m);
+
+ mutex_lock(&edev->lock);
+
+ if (edev->pdata->prepare)
+ edev->pdata->prepare(edev);
+
+ ret = spi_sync(edev->spi, &m);
+ /* have to wait at least Tcsl ns */
+ ndelay(250);
+ if (ret) {
+ dev_err(&edev->spi->dev, "read %zu bytes at %d: err. %d\n",
+ count, (int)off, ret);
+ }
+
+ if (edev->pdata->finish)
+ edev->pdata->finish(edev);
+
+ mutex_unlock(&edev->lock);
+ return ret ? : count;
+}
+
+static int eeprom_93xx46_ew(struct eeprom_93xx46_dev *edev, int is_on)
+{
+ struct spi_message m;
+ struct spi_transfer t;
+ int bits, ret;
+ u16 cmd_addr;
+
+ cmd_addr = OP_START << edev->addrlen;
+ if (edev->addrlen == 7) {
+ cmd_addr |= (is_on ? ADDR_EWEN : ADDR_EWDS) << 1;
+ bits = 10;
+ } else {
+ cmd_addr |= (is_on ? ADDR_EWEN : ADDR_EWDS);
+ bits = 9;
+ }
+
+ dev_dbg(&edev->spi->dev, "ew cmd 0x%04x\n", cmd_addr);
+
+ spi_message_init(&m);
+ memset(&t, 0, sizeof(t));
+
+ t.tx_buf = &cmd_addr;
+ t.len = 2;
+ t.bits_per_word = bits;
+ spi_message_add_tail(&t, &m);
+
+ mutex_lock(&edev->lock);
+
+ if (edev->pdata->prepare)
+ edev->pdata->prepare(edev);
+
+ ret = spi_sync(edev->spi, &m);
+ /* have to wait at least Tcsl ns */
+ ndelay(250);
+ if (ret)
+ dev_err(&edev->spi->dev, "erase/write %sable error %d\n",
+ is_on ? "en" : "dis", ret);
+
+ if (edev->pdata->finish)
+ edev->pdata->finish(edev);
+
+ mutex_unlock(&edev->lock);
+ return ret;
+}
+
+static ssize_t
+eeprom_93xx46_write_word(struct eeprom_93xx46_dev *edev,
+ const char *buf, unsigned off)
+{
+ struct spi_message m;
+ struct spi_transfer t[2];
+ int bits, data_len, ret;
+ u16 cmd_addr;
+
+ cmd_addr = OP_WRITE << edev->addrlen;
+
+ if (edev->addrlen == 7) {
+ cmd_addr |= off & 0x7f;
+ bits = 10;
+ data_len = 1;
+ } else {
+ cmd_addr |= off & 0x3f;
+ bits = 9;
+ data_len = 2;
+ }
+
+ dev_dbg(&edev->spi->dev, "write cmd 0x%x\n", cmd_addr);
+
+ spi_message_init(&m);
+ memset(t, 0, sizeof(t));
+
+ t[0].tx_buf = (char *)&cmd_addr;
+ t[0].len = 2;
+ t[0].bits_per_word = bits;
+ spi_message_add_tail(&t[0], &m);
+
+ t[1].tx_buf = buf;
+ t[1].len = data_len;
+ t[1].bits_per_word = 8;
+ spi_message_add_tail(&t[1], &m);
+
+ ret = spi_sync(edev->spi, &m);
+ /* have to wait program cycle time Twc ms */
+ mdelay(6);
+ return ret;
+}
+
+static ssize_t
+eeprom_93xx46_bin_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct eeprom_93xx46_dev *edev;
+ struct device *dev;
+ int i, ret, step = 1;
+
+ dev = container_of(kobj, struct device, kobj);
+ edev = dev_get_drvdata(dev);
+
+ if (unlikely(off >= edev->bin.size))
+ return 0;
+ if ((off + count) > edev->bin.size)
+ count = edev->bin.size - off;
+ if (unlikely(!count))
+ return count;
+
+ /* only write even number of bytes on 16-bit devices */
+ if (edev->addrlen == 6) {
+ step = 2;
+ count &= ~1;
+ }
+
+ /* erase/write enable */
+ ret = eeprom_93xx46_ew(edev, 1);
+ if (ret)
+ return ret;
+
+ mutex_lock(&edev->lock);
+
+ if (edev->pdata->prepare)
+ edev->pdata->prepare(edev);
+
+ for (i = 0; i < count; i += step) {
+ ret = eeprom_93xx46_write_word(edev, &buf[i], off + i);
+ if (ret) {
+ dev_err(&edev->spi->dev, "write failed at %d: %d\n",
+ (int)off + i, ret);
+ break;
+ }
+ }
+
+ if (edev->pdata->finish)
+ edev->pdata->finish(edev);
+
+ mutex_unlock(&edev->lock);
+
+ /* erase/write disable */
+ eeprom_93xx46_ew(edev, 0);
+ return ret ? : count;
+}
+
+static int eeprom_93xx46_eral(struct eeprom_93xx46_dev *edev)
+{
+ struct eeprom_93xx46_platform_data *pd = edev->pdata;
+ struct spi_message m;
+ struct spi_transfer t;
+ int bits, ret;
+ u16 cmd_addr;
+
+ cmd_addr = OP_START << edev->addrlen;
+ if (edev->addrlen == 7) {
+ cmd_addr |= ADDR_ERAL << 1;
+ bits = 10;
+ } else {
+ cmd_addr |= ADDR_ERAL;
+ bits = 9;
+ }
+
+ spi_message_init(&m);
+ memset(&t, 0, sizeof(t));
+
+ t.tx_buf = &cmd_addr;
+ t.len = 2;
+ t.bits_per_word = bits;
+ spi_message_add_tail(&t, &m);
+
+ mutex_lock(&edev->lock);
+
+ if (edev->pdata->prepare)
+ edev->pdata->prepare(edev);
+
+ ret = spi_sync(edev->spi, &m);
+ if (ret)
+ dev_err(&edev->spi->dev, "erase error %d\n", ret);
+ /* have to wait erase cycle time Tec ms */
+ mdelay(6);
+
+ if (pd->finish)
+ pd->finish(edev);
+
+ mutex_unlock(&edev->lock);
+ return ret;
+}
+
+static ssize_t eeprom_93xx46_store_erase(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct eeprom_93xx46_dev *edev = dev_get_drvdata(dev);
+ int erase = 0, ret;
+
+ sscanf(buf, "%d", &erase);
+ if (erase) {
+ ret = eeprom_93xx46_ew(edev, 1);
+ if (ret)
+ return ret;
+ ret = eeprom_93xx46_eral(edev);
+ if (ret)
+ return ret;
+ ret = eeprom_93xx46_ew(edev, 0);
+ if (ret)
+ return ret;
+ }
+ return count;
+}
+static DEVICE_ATTR(erase, S_IWUSR, NULL, eeprom_93xx46_store_erase);
+
+static int __devinit eeprom_93xx46_probe(struct spi_device *spi)
+{
+ struct eeprom_93xx46_platform_data *pd;
+ struct eeprom_93xx46_dev *edev;
+ int err;
+
+ pd = spi->dev.platform_data;
+ if (!pd) {
+ dev_err(&spi->dev, "missing platform data\n");
+ return -ENODEV;
+ }
+
+ edev = kzalloc(sizeof(*edev), GFP_KERNEL);
+ if (!edev)
+ return -ENOMEM;
+
+ if (pd->flags & EE_ADDR8)
+ edev->addrlen = 7;
+ else if (pd->flags & EE_ADDR16)
+ edev->addrlen = 6;
+ else {
+ dev_err(&spi->dev, "unspecified address type\n");
+ err = -EINVAL;
+ goto fail;
+ }
+
+ mutex_init(&edev->lock);
+
+ edev->spi = spi_dev_get(spi);
+ edev->pdata = pd;
+
+ sysfs_bin_attr_init(&edev->bin);
+ edev->bin.attr.name = "eeprom";
+ edev->bin.attr.mode = S_IRUSR;
+ edev->bin.read = eeprom_93xx46_bin_read;
+ edev->bin.size = 128;
+ if (!(pd->flags & EE_READONLY)) {
+ edev->bin.write = eeprom_93xx46_bin_write;
+ edev->bin.attr.mode |= S_IWUSR;
+ }
+
+ err = sysfs_create_bin_file(&spi->dev.kobj, &edev->bin);
+ if (err)
+ goto fail;
+
+ dev_info(&spi->dev, "%d-bit eeprom %s\n",
+ (pd->flags & EE_ADDR8) ? 8 : 16,
+ (pd->flags & EE_READONLY) ? "(readonly)" : "");
+
+ if (!(pd->flags & EE_READONLY)) {
+ if (device_create_file(&spi->dev, &dev_attr_erase))
+ dev_err(&spi->dev, "can't create erase interface\n");
+ }
+
+ dev_set_drvdata(&spi->dev, edev);
+ return 0;
+fail:
+ kfree(edev);
+ return err;
+}
+
+static int __devexit eeprom_93xx46_remove(struct spi_device *spi)
+{
+ struct eeprom_93xx46_dev *edev = dev_get_drvdata(&spi->dev);
+
+ if (!(edev->pdata->flags & EE_READONLY))
+ device_remove_file(&spi->dev, &dev_attr_erase);
+
+ sysfs_remove_bin_file(&spi->dev.kobj, &edev->bin);
+ dev_set_drvdata(&spi->dev, NULL);
+ kfree(edev);
+ return 0;
+}
+
+static struct spi_driver eeprom_93xx46_driver = {
+ .driver = {
+ .name = "93xx46",
+ .owner = THIS_MODULE,
+ },
+ .probe = eeprom_93xx46_probe,
+ .remove = __devexit_p(eeprom_93xx46_remove),
+};
+
+module_spi_driver(eeprom_93xx46_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Driver for 93xx46 EEPROMs");
+MODULE_AUTHOR("Anatolij Gustschin <agust@denx.de>");
+MODULE_ALIAS("spi:93xx46");
diff --git a/drivers/misc/eeprom/max6875.c b/drivers/misc/eeprom/max6875.c
new file mode 100644
index 00000000..e36157d5
--- /dev/null
+++ b/drivers/misc/eeprom/max6875.c
@@ -0,0 +1,215 @@
+/*
+ * max6875.c - driver for MAX6874/MAX6875
+ *
+ * Copyright (C) 2005 Ben Gardner <bgardner@wabtec.com>
+ *
+ * Based on eeprom.c
+ *
+ * The MAX6875 has a bank of registers and two banks of EEPROM.
+ * Address ranges are defined as follows:
+ * * 0x0000 - 0x0046 = configuration registers
+ * * 0x8000 - 0x8046 = configuration EEPROM
+ * * 0x8100 - 0x82FF = user EEPROM
+ *
+ * This driver makes the user EEPROM available for read.
+ *
+ * The registers & config EEPROM should be accessed via i2c-dev.
+ *
+ * The MAX6875 ignores the lowest address bit, so each chip responds to
+ * two addresses - 0x50/0x51 and 0x52/0x53.
+ *
+ * Note that the MAX6875 uses i2c_smbus_write_byte_data() to set the read
+ * address, so this driver is destructive if loaded for the wrong EEPROM chip.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+
+/* The MAX6875 can only read/write 16 bytes at a time */
+#define SLICE_SIZE 16
+#define SLICE_BITS 4
+
+/* USER EEPROM is at addresses 0x8100 - 0x82FF */
+#define USER_EEPROM_BASE 0x8100
+#define USER_EEPROM_SIZE 0x0200
+#define USER_EEPROM_SLICES 32
+
+/* MAX6875 commands */
+#define MAX6875_CMD_BLK_READ 0x84
+
+/* Each client has this additional data */
+struct max6875_data {
+ struct i2c_client *fake_client;
+ struct mutex update_lock;
+
+ u32 valid;
+ u8 data[USER_EEPROM_SIZE];
+ unsigned long last_updated[USER_EEPROM_SLICES];
+};
+
+static void max6875_update_slice(struct i2c_client *client, int slice)
+{
+ struct max6875_data *data = i2c_get_clientdata(client);
+ int i, j, addr;
+ u8 *buf;
+
+ if (slice >= USER_EEPROM_SLICES)
+ return;
+
+ mutex_lock(&data->update_lock);
+
+ buf = &data->data[slice << SLICE_BITS];
+
+ if (!(data->valid & (1 << slice)) ||
+ time_after(jiffies, data->last_updated[slice])) {
+
+ dev_dbg(&client->dev, "Starting update of slice %u\n", slice);
+
+ data->valid &= ~(1 << slice);
+
+ addr = USER_EEPROM_BASE + (slice << SLICE_BITS);
+
+ /* select the eeprom address */
+ if (i2c_smbus_write_byte_data(client, addr >> 8, addr & 0xFF)) {
+ dev_err(&client->dev, "address set failed\n");
+ goto exit_up;
+ }
+
+ if (i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_I2C_BLOCK)) {
+ if (i2c_smbus_read_i2c_block_data(client,
+ MAX6875_CMD_BLK_READ,
+ SLICE_SIZE,
+ buf) != SLICE_SIZE) {
+ goto exit_up;
+ }
+ } else {
+ for (i = 0; i < SLICE_SIZE; i++) {
+ j = i2c_smbus_read_byte(client);
+ if (j < 0) {
+ goto exit_up;
+ }
+ buf[i] = j;
+ }
+ }
+ data->last_updated[slice] = jiffies;
+ data->valid |= (1 << slice);
+ }
+exit_up:
+ mutex_unlock(&data->update_lock);
+}
+
+static ssize_t max6875_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct i2c_client *client = kobj_to_i2c_client(kobj);
+ struct max6875_data *data = i2c_get_clientdata(client);
+ int slice, max_slice;
+
+ if (off > USER_EEPROM_SIZE)
+ return 0;
+
+ if (off + count > USER_EEPROM_SIZE)
+ count = USER_EEPROM_SIZE - off;
+
+ /* refresh slices which contain requested bytes */
+ max_slice = (off + count - 1) >> SLICE_BITS;
+ for (slice = (off >> SLICE_BITS); slice <= max_slice; slice++)
+ max6875_update_slice(client, slice);
+
+ memcpy(buf, &data->data[off], count);
+
+ return count;
+}
+
+static struct bin_attribute user_eeprom_attr = {
+ .attr = {
+ .name = "eeprom",
+ .mode = S_IRUGO,
+ },
+ .size = USER_EEPROM_SIZE,
+ .read = max6875_read,
+};
+
+static int max6875_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ struct max6875_data *data;
+ int err;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WRITE_BYTE_DATA
+ | I2C_FUNC_SMBUS_READ_BYTE))
+ return -ENODEV;
+
+ /* Only bind to even addresses */
+ if (client->addr & 1)
+ return -ENODEV;
+
+ if (!(data = kzalloc(sizeof(struct max6875_data), GFP_KERNEL)))
+ return -ENOMEM;
+
+ /* A fake client is created on the odd address */
+ data->fake_client = i2c_new_dummy(client->adapter, client->addr + 1);
+ if (!data->fake_client) {
+ err = -ENOMEM;
+ goto exit_kfree;
+ }
+
+ /* Init real i2c_client */
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->update_lock);
+
+ err = sysfs_create_bin_file(&client->dev.kobj, &user_eeprom_attr);
+ if (err)
+ goto exit_remove_fake;
+
+ return 0;
+
+exit_remove_fake:
+ i2c_unregister_device(data->fake_client);
+exit_kfree:
+ kfree(data);
+ return err;
+}
+
+static int max6875_remove(struct i2c_client *client)
+{
+ struct max6875_data *data = i2c_get_clientdata(client);
+
+ i2c_unregister_device(data->fake_client);
+
+ sysfs_remove_bin_file(&client->dev.kobj, &user_eeprom_attr);
+ kfree(data);
+
+ return 0;
+}
+
+static const struct i2c_device_id max6875_id[] = {
+ { "max6875", 0 },
+ { }
+};
+
+static struct i2c_driver max6875_driver = {
+ .driver = {
+ .name = "max6875",
+ },
+ .probe = max6875_probe,
+ .remove = max6875_remove,
+ .id_table = max6875_id,
+};
+
+module_i2c_driver(max6875_driver);
+
+MODULE_AUTHOR("Ben Gardner <bgardner@wabtec.com>");
+MODULE_DESCRIPTION("MAX6875 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
new file mode 100644
index 00000000..00e5fcac
--- /dev/null
+++ b/drivers/misc/enclosure.c
@@ -0,0 +1,570 @@
+/*
+ * Enclosure Services
+ *
+ * Copyright (C) 2008 James Bottomley <James.Bottomley@HansenPartnership.com>
+ *
+**-----------------------------------------------------------------------------
+**
+** This program is free software; you can redistribute it and/or
+** modify it under the terms of the GNU General Public License
+** version 2 as published by the Free Software Foundation.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+**
+** You should have received a copy of the GNU General Public License
+** along with this program; if not, write to the Free Software
+** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+**
+**-----------------------------------------------------------------------------
+*/
+#include <linux/device.h>
+#include <linux/enclosure.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+static LIST_HEAD(container_list);
+static DEFINE_MUTEX(container_list_lock);
+static struct class enclosure_class;
+
+/**
+ * enclosure_find - find an enclosure given a parent device
+ * @dev: the parent to match against
+ * @start: Optional enclosure device to start from (NULL if none)
+ *
+ * Looks through the list of registered enclosures to find all those
+ * with @dev as a parent. Returns NULL if no enclosure is
+ * found. @start can be used as a starting point to obtain multiple
+ * enclosures per parent (should begin with NULL and then be set to
+ * each returned enclosure device). Obtains a reference to the
+ * enclosure class device which must be released with device_put().
+ * If @start is not NULL, a reference must be taken on it which is
+ * released before returning (this allows a loop through all
+ * enclosures to exit with only the reference on the enclosure of
+ * interest held). Note that the @dev may correspond to the actual
+ * device housing the enclosure, in which case no iteration via @start
+ * is required.
+ */
+struct enclosure_device *enclosure_find(struct device *dev,
+ struct enclosure_device *start)
+{
+ struct enclosure_device *edev;
+
+ mutex_lock(&container_list_lock);
+ edev = list_prepare_entry(start, &container_list, node);
+ if (start)
+ put_device(&start->edev);
+
+ list_for_each_entry_continue(edev, &container_list, node) {
+ struct device *parent = edev->edev.parent;
+ /* parent might not be immediate, so iterate up to
+ * the root of the tree if necessary */
+ while (parent) {
+ if (parent == dev) {
+ get_device(&edev->edev);
+ mutex_unlock(&container_list_lock);
+ return edev;
+ }
+ parent = parent->parent;
+ }
+ }
+ mutex_unlock(&container_list_lock);
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(enclosure_find);
+
+/**
+ * enclosure_for_each_device - calls a function for each enclosure
+ * @fn: the function to call
+ * @data: the data to pass to each call
+ *
+ * Loops over all the enclosures calling the function.
+ *
+ * Note, this function uses a mutex which will be held across calls to
+ * @fn, so it must have non atomic context, and @fn may (although it
+ * should not) sleep or otherwise cause the mutex to be held for
+ * indefinite periods
+ */
+int enclosure_for_each_device(int (*fn)(struct enclosure_device *, void *),
+ void *data)
+{
+ int error = 0;
+ struct enclosure_device *edev;
+
+ mutex_lock(&container_list_lock);
+ list_for_each_entry(edev, &container_list, node) {
+ error = fn(edev, data);
+ if (error)
+ break;
+ }
+ mutex_unlock(&container_list_lock);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(enclosure_for_each_device);
+
+/**
+ * enclosure_register - register device as an enclosure
+ *
+ * @dev: device containing the enclosure
+ * @components: number of components in the enclosure
+ *
+ * This sets up the device for being an enclosure. Note that @dev does
+ * not have to be a dedicated enclosure device. It may be some other type
+ * of device that additionally responds to enclosure services
+ */
+struct enclosure_device *
+enclosure_register(struct device *dev, const char *name, int components,
+ struct enclosure_component_callbacks *cb)
+{
+ struct enclosure_device *edev =
+ kzalloc(sizeof(struct enclosure_device) +
+ sizeof(struct enclosure_component)*components,
+ GFP_KERNEL);
+ int err, i;
+
+ BUG_ON(!cb);
+
+ if (!edev)
+ return ERR_PTR(-ENOMEM);
+
+ edev->components = components;
+
+ edev->edev.class = &enclosure_class;
+ edev->edev.parent = get_device(dev);
+ edev->cb = cb;
+ dev_set_name(&edev->edev, "%s", name);
+ err = device_register(&edev->edev);
+ if (err)
+ goto err;
+
+ for (i = 0; i < components; i++)
+ edev->component[i].number = -1;
+
+ mutex_lock(&container_list_lock);
+ list_add_tail(&edev->node, &container_list);
+ mutex_unlock(&container_list_lock);
+
+ return edev;
+
+ err:
+ put_device(edev->edev.parent);
+ kfree(edev);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(enclosure_register);
+
+static struct enclosure_component_callbacks enclosure_null_callbacks;
+
+/**
+ * enclosure_unregister - remove an enclosure
+ *
+ * @edev: the registered enclosure to remove;
+ */
+void enclosure_unregister(struct enclosure_device *edev)
+{
+ int i;
+
+ mutex_lock(&container_list_lock);
+ list_del(&edev->node);
+ mutex_unlock(&container_list_lock);
+
+ for (i = 0; i < edev->components; i++)
+ if (edev->component[i].number != -1)
+ device_unregister(&edev->component[i].cdev);
+
+ /* prevent any callbacks into service user */
+ edev->cb = &enclosure_null_callbacks;
+ device_unregister(&edev->edev);
+}
+EXPORT_SYMBOL_GPL(enclosure_unregister);
+
+#define ENCLOSURE_NAME_SIZE 64
+
+static void enclosure_link_name(struct enclosure_component *cdev, char *name)
+{
+ strcpy(name, "enclosure_device:");
+ strcat(name, dev_name(&cdev->cdev));
+}
+
+static void enclosure_remove_links(struct enclosure_component *cdev)
+{
+ char name[ENCLOSURE_NAME_SIZE];
+
+ enclosure_link_name(cdev, name);
+ sysfs_remove_link(&cdev->dev->kobj, name);
+ sysfs_remove_link(&cdev->cdev.kobj, "device");
+}
+
+static int enclosure_add_links(struct enclosure_component *cdev)
+{
+ int error;
+ char name[ENCLOSURE_NAME_SIZE];
+
+ error = sysfs_create_link(&cdev->cdev.kobj, &cdev->dev->kobj, "device");
+ if (error)
+ return error;
+
+ enclosure_link_name(cdev, name);
+ error = sysfs_create_link(&cdev->dev->kobj, &cdev->cdev.kobj, name);
+ if (error)
+ sysfs_remove_link(&cdev->cdev.kobj, "device");
+
+ return error;
+}
+
+static void enclosure_release(struct device *cdev)
+{
+ struct enclosure_device *edev = to_enclosure_device(cdev);
+
+ put_device(cdev->parent);
+ kfree(edev);
+}
+
+static void enclosure_component_release(struct device *dev)
+{
+ struct enclosure_component *cdev = to_enclosure_component(dev);
+
+ if (cdev->dev) {
+ enclosure_remove_links(cdev);
+ put_device(cdev->dev);
+ }
+ put_device(dev->parent);
+}
+
+static const struct attribute_group *enclosure_groups[];
+
+/**
+ * enclosure_component_register - add a particular component to an enclosure
+ * @edev: the enclosure to add the component
+ * @num: the device number
+ * @type: the type of component being added
+ * @name: an optional name to appear in sysfs (leave NULL if none)
+ *
+ * Registers the component. The name is optional for enclosures that
+ * give their components a unique name. If not, leave the field NULL
+ * and a name will be assigned.
+ *
+ * Returns a pointer to the enclosure component or an error.
+ */
+struct enclosure_component *
+enclosure_component_register(struct enclosure_device *edev,
+ unsigned int number,
+ enum enclosure_component_type type,
+ const char *name)
+{
+ struct enclosure_component *ecomp;
+ struct device *cdev;
+ int err;
+
+ if (number >= edev->components)
+ return ERR_PTR(-EINVAL);
+
+ ecomp = &edev->component[number];
+
+ if (ecomp->number != -1)
+ return ERR_PTR(-EINVAL);
+
+ ecomp->type = type;
+ ecomp->number = number;
+ cdev = &ecomp->cdev;
+ cdev->parent = get_device(&edev->edev);
+ if (name && name[0])
+ dev_set_name(cdev, "%s", name);
+ else
+ dev_set_name(cdev, "%u", number);
+
+ cdev->release = enclosure_component_release;
+ cdev->groups = enclosure_groups;
+
+ err = device_register(cdev);
+ if (err) {
+ ecomp->number = -1;
+ put_device(cdev);
+ return ERR_PTR(err);
+ }
+
+ return ecomp;
+}
+EXPORT_SYMBOL_GPL(enclosure_component_register);
+
+/**
+ * enclosure_add_device - add a device as being part of an enclosure
+ * @edev: the enclosure device being added to.
+ * @num: the number of the component
+ * @dev: the device being added
+ *
+ * Declares a real device to reside in slot (or identifier) @num of an
+ * enclosure. This will cause the relevant sysfs links to appear.
+ * This function may also be used to change a device associated with
+ * an enclosure without having to call enclosure_remove_device() in
+ * between.
+ *
+ * Returns zero on success or an error.
+ */
+int enclosure_add_device(struct enclosure_device *edev, int component,
+ struct device *dev)
+{
+ struct enclosure_component *cdev;
+
+ if (!edev || component >= edev->components)
+ return -EINVAL;
+
+ cdev = &edev->component[component];
+
+ if (cdev->dev == dev)
+ return -EEXIST;
+
+ if (cdev->dev)
+ enclosure_remove_links(cdev);
+
+ put_device(cdev->dev);
+ cdev->dev = get_device(dev);
+ return enclosure_add_links(cdev);
+}
+EXPORT_SYMBOL_GPL(enclosure_add_device);
+
+/**
+ * enclosure_remove_device - remove a device from an enclosure
+ * @edev: the enclosure device
+ * @num: the number of the component to remove
+ *
+ * Returns zero on success or an error.
+ *
+ */
+int enclosure_remove_device(struct enclosure_device *edev, struct device *dev)
+{
+ struct enclosure_component *cdev;
+ int i;
+
+ if (!edev || !dev)
+ return -EINVAL;
+
+ for (i = 0; i < edev->components; i++) {
+ cdev = &edev->component[i];
+ if (cdev->dev == dev) {
+ enclosure_remove_links(cdev);
+ device_del(&cdev->cdev);
+ put_device(dev);
+ cdev->dev = NULL;
+ return device_add(&cdev->cdev);
+ }
+ }
+ return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(enclosure_remove_device);
+
+/*
+ * sysfs pieces below
+ */
+
+static ssize_t enclosure_show_components(struct device *cdev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct enclosure_device *edev = to_enclosure_device(cdev);
+
+ return snprintf(buf, 40, "%d\n", edev->components);
+}
+
+static struct device_attribute enclosure_attrs[] = {
+ __ATTR(components, S_IRUGO, enclosure_show_components, NULL),
+ __ATTR_NULL
+};
+
+static struct class enclosure_class = {
+ .name = "enclosure",
+ .owner = THIS_MODULE,
+ .dev_release = enclosure_release,
+ .dev_attrs = enclosure_attrs,
+};
+
+static const char *const enclosure_status [] = {
+ [ENCLOSURE_STATUS_UNSUPPORTED] = "unsupported",
+ [ENCLOSURE_STATUS_OK] = "OK",
+ [ENCLOSURE_STATUS_CRITICAL] = "critical",
+ [ENCLOSURE_STATUS_NON_CRITICAL] = "non-critical",
+ [ENCLOSURE_STATUS_UNRECOVERABLE] = "unrecoverable",
+ [ENCLOSURE_STATUS_NOT_INSTALLED] = "not installed",
+ [ENCLOSURE_STATUS_UNKNOWN] = "unknown",
+ [ENCLOSURE_STATUS_UNAVAILABLE] = "unavailable",
+ [ENCLOSURE_STATUS_MAX] = NULL,
+};
+
+static const char *const enclosure_type [] = {
+ [ENCLOSURE_COMPONENT_DEVICE] = "device",
+ [ENCLOSURE_COMPONENT_ARRAY_DEVICE] = "array device",
+};
+
+static ssize_t get_component_fault(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct enclosure_device *edev = to_enclosure_device(cdev->parent);
+ struct enclosure_component *ecomp = to_enclosure_component(cdev);
+
+ if (edev->cb->get_fault)
+ edev->cb->get_fault(edev, ecomp);
+ return snprintf(buf, 40, "%d\n", ecomp->fault);
+}
+
+static ssize_t set_component_fault(struct device *cdev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct enclosure_device *edev = to_enclosure_device(cdev->parent);
+ struct enclosure_component *ecomp = to_enclosure_component(cdev);
+ int val = simple_strtoul(buf, NULL, 0);
+
+ if (edev->cb->set_fault)
+ edev->cb->set_fault(edev, ecomp, val);
+ return count;
+}
+
+static ssize_t get_component_status(struct device *cdev,
+ struct device_attribute *attr,char *buf)
+{
+ struct enclosure_device *edev = to_enclosure_device(cdev->parent);
+ struct enclosure_component *ecomp = to_enclosure_component(cdev);
+
+ if (edev->cb->get_status)
+ edev->cb->get_status(edev, ecomp);
+ return snprintf(buf, 40, "%s\n", enclosure_status[ecomp->status]);
+}
+
+static ssize_t set_component_status(struct device *cdev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct enclosure_device *edev = to_enclosure_device(cdev->parent);
+ struct enclosure_component *ecomp = to_enclosure_component(cdev);
+ int i;
+
+ for (i = 0; enclosure_status[i]; i++) {
+ if (strncmp(buf, enclosure_status[i],
+ strlen(enclosure_status[i])) == 0 &&
+ (buf[strlen(enclosure_status[i])] == '\n' ||
+ buf[strlen(enclosure_status[i])] == '\0'))
+ break;
+ }
+
+ if (enclosure_status[i] && edev->cb->set_status) {
+ edev->cb->set_status(edev, ecomp, i);
+ return count;
+ } else
+ return -EINVAL;
+}
+
+static ssize_t get_component_active(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct enclosure_device *edev = to_enclosure_device(cdev->parent);
+ struct enclosure_component *ecomp = to_enclosure_component(cdev);
+
+ if (edev->cb->get_active)
+ edev->cb->get_active(edev, ecomp);
+ return snprintf(buf, 40, "%d\n", ecomp->active);
+}
+
+static ssize_t set_component_active(struct device *cdev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct enclosure_device *edev = to_enclosure_device(cdev->parent);
+ struct enclosure_component *ecomp = to_enclosure_component(cdev);
+ int val = simple_strtoul(buf, NULL, 0);
+
+ if (edev->cb->set_active)
+ edev->cb->set_active(edev, ecomp, val);
+ return count;
+}
+
+static ssize_t get_component_locate(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct enclosure_device *edev = to_enclosure_device(cdev->parent);
+ struct enclosure_component *ecomp = to_enclosure_component(cdev);
+
+ if (edev->cb->get_locate)
+ edev->cb->get_locate(edev, ecomp);
+ return snprintf(buf, 40, "%d\n", ecomp->locate);
+}
+
+static ssize_t set_component_locate(struct device *cdev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct enclosure_device *edev = to_enclosure_device(cdev->parent);
+ struct enclosure_component *ecomp = to_enclosure_component(cdev);
+ int val = simple_strtoul(buf, NULL, 0);
+
+ if (edev->cb->set_locate)
+ edev->cb->set_locate(edev, ecomp, val);
+ return count;
+}
+
+static ssize_t get_component_type(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct enclosure_component *ecomp = to_enclosure_component(cdev);
+
+ return snprintf(buf, 40, "%s\n", enclosure_type[ecomp->type]);
+}
+
+
+static DEVICE_ATTR(fault, S_IRUGO | S_IWUSR, get_component_fault,
+ set_component_fault);
+static DEVICE_ATTR(status, S_IRUGO | S_IWUSR, get_component_status,
+ set_component_status);
+static DEVICE_ATTR(active, S_IRUGO | S_IWUSR, get_component_active,
+ set_component_active);
+static DEVICE_ATTR(locate, S_IRUGO | S_IWUSR, get_component_locate,
+ set_component_locate);
+static DEVICE_ATTR(type, S_IRUGO, get_component_type, NULL);
+
+static struct attribute *enclosure_component_attrs[] = {
+ &dev_attr_fault.attr,
+ &dev_attr_status.attr,
+ &dev_attr_active.attr,
+ &dev_attr_locate.attr,
+ &dev_attr_type.attr,
+ NULL
+};
+
+static struct attribute_group enclosure_group = {
+ .attrs = enclosure_component_attrs,
+};
+
+static const struct attribute_group *enclosure_groups[] = {
+ &enclosure_group,
+ NULL
+};
+
+static int __init enclosure_init(void)
+{
+ int err;
+
+ err = class_register(&enclosure_class);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void __exit enclosure_exit(void)
+{
+ class_unregister(&enclosure_class);
+}
+
+module_init(enclosure_init);
+module_exit(enclosure_exit);
+
+MODULE_AUTHOR("James Bottomley");
+MODULE_DESCRIPTION("Enclosure Services");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/ep93xx_pwm.c b/drivers/misc/ep93xx_pwm.c
new file mode 100644
index 00000000..16d7179e
--- /dev/null
+++ b/drivers/misc/ep93xx_pwm.c
@@ -0,0 +1,385 @@
+/*
+ * Simple PWM driver for EP93XX
+ *
+ * (c) Copyright 2009 Matthieu Crapet <mcrapet@gmail.com>
+ * (c) Copyright 2009 H Hartley Sweeten <hsweeten@visionengravers.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * EP9307 has only one channel:
+ * - PWMOUT
+ *
+ * EP9301/02/12/15 have two channels:
+ * - PWMOUT
+ * - PWMOUT1 (alternate function for EGPIO14)
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+
+#include <mach/platform.h>
+
+#define EP93XX_PWMx_TERM_COUNT 0x00
+#define EP93XX_PWMx_DUTY_CYCLE 0x04
+#define EP93XX_PWMx_ENABLE 0x08
+#define EP93XX_PWMx_INVERT 0x0C
+
+#define EP93XX_PWM_MAX_COUNT 0xFFFF
+
+struct ep93xx_pwm {
+ void __iomem *mmio_base;
+ struct clk *clk;
+ u32 duty_percent;
+};
+
+static inline void ep93xx_pwm_writel(struct ep93xx_pwm *pwm,
+ unsigned int val, unsigned int off)
+{
+ __raw_writel(val, pwm->mmio_base + off);
+}
+
+static inline unsigned int ep93xx_pwm_readl(struct ep93xx_pwm *pwm,
+ unsigned int off)
+{
+ return __raw_readl(pwm->mmio_base + off);
+}
+
+static inline void ep93xx_pwm_write_tc(struct ep93xx_pwm *pwm, u16 value)
+{
+ ep93xx_pwm_writel(pwm, value, EP93XX_PWMx_TERM_COUNT);
+}
+
+static inline u16 ep93xx_pwm_read_tc(struct ep93xx_pwm *pwm)
+{
+ return ep93xx_pwm_readl(pwm, EP93XX_PWMx_TERM_COUNT);
+}
+
+static inline void ep93xx_pwm_write_dc(struct ep93xx_pwm *pwm, u16 value)
+{
+ ep93xx_pwm_writel(pwm, value, EP93XX_PWMx_DUTY_CYCLE);
+}
+
+static inline void ep93xx_pwm_enable(struct ep93xx_pwm *pwm)
+{
+ ep93xx_pwm_writel(pwm, 0x1, EP93XX_PWMx_ENABLE);
+}
+
+static inline void ep93xx_pwm_disable(struct ep93xx_pwm *pwm)
+{
+ ep93xx_pwm_writel(pwm, 0x0, EP93XX_PWMx_ENABLE);
+}
+
+static inline int ep93xx_pwm_is_enabled(struct ep93xx_pwm *pwm)
+{
+ return ep93xx_pwm_readl(pwm, EP93XX_PWMx_ENABLE) & 0x1;
+}
+
+static inline void ep93xx_pwm_invert(struct ep93xx_pwm *pwm)
+{
+ ep93xx_pwm_writel(pwm, 0x1, EP93XX_PWMx_INVERT);
+}
+
+static inline void ep93xx_pwm_normal(struct ep93xx_pwm *pwm)
+{
+ ep93xx_pwm_writel(pwm, 0x0, EP93XX_PWMx_INVERT);
+}
+
+static inline int ep93xx_pwm_is_inverted(struct ep93xx_pwm *pwm)
+{
+ return ep93xx_pwm_readl(pwm, EP93XX_PWMx_INVERT) & 0x1;
+}
+
+/*
+ * /sys/devices/platform/ep93xx-pwm.N
+ * /min_freq read-only minimum pwm output frequency
+ * /max_req read-only maximum pwm output frequency
+ * /freq read-write pwm output frequency (0 = disable output)
+ * /duty_percent read-write pwm duty cycle percent (1..99)
+ * /invert read-write invert pwm output
+ */
+
+static ssize_t ep93xx_pwm_get_min_freq(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ep93xx_pwm *pwm = platform_get_drvdata(pdev);
+ unsigned long rate = clk_get_rate(pwm->clk);
+
+ return sprintf(buf, "%ld\n", rate / (EP93XX_PWM_MAX_COUNT + 1));
+}
+
+static ssize_t ep93xx_pwm_get_max_freq(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ep93xx_pwm *pwm = platform_get_drvdata(pdev);
+ unsigned long rate = clk_get_rate(pwm->clk);
+
+ return sprintf(buf, "%ld\n", rate / 2);
+}
+
+static ssize_t ep93xx_pwm_get_freq(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ep93xx_pwm *pwm = platform_get_drvdata(pdev);
+
+ if (ep93xx_pwm_is_enabled(pwm)) {
+ unsigned long rate = clk_get_rate(pwm->clk);
+ u16 term = ep93xx_pwm_read_tc(pwm);
+
+ return sprintf(buf, "%ld\n", rate / (term + 1));
+ } else {
+ return sprintf(buf, "disabled\n");
+ }
+}
+
+static ssize_t ep93xx_pwm_set_freq(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ep93xx_pwm *pwm = platform_get_drvdata(pdev);
+ long val;
+ int err;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return -EINVAL;
+
+ if (val == 0) {
+ ep93xx_pwm_disable(pwm);
+ } else if (val <= (clk_get_rate(pwm->clk) / 2)) {
+ u32 term, duty;
+
+ val = (clk_get_rate(pwm->clk) / val) - 1;
+ if (val > EP93XX_PWM_MAX_COUNT)
+ val = EP93XX_PWM_MAX_COUNT;
+ if (val < 1)
+ val = 1;
+
+ term = ep93xx_pwm_read_tc(pwm);
+ duty = ((val + 1) * pwm->duty_percent / 100) - 1;
+
+ /* If pwm is running, order is important */
+ if (val > term) {
+ ep93xx_pwm_write_tc(pwm, val);
+ ep93xx_pwm_write_dc(pwm, duty);
+ } else {
+ ep93xx_pwm_write_dc(pwm, duty);
+ ep93xx_pwm_write_tc(pwm, val);
+ }
+
+ if (!ep93xx_pwm_is_enabled(pwm))
+ ep93xx_pwm_enable(pwm);
+ } else {
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static ssize_t ep93xx_pwm_get_duty_percent(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ep93xx_pwm *pwm = platform_get_drvdata(pdev);
+
+ return sprintf(buf, "%d\n", pwm->duty_percent);
+}
+
+static ssize_t ep93xx_pwm_set_duty_percent(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ep93xx_pwm *pwm = platform_get_drvdata(pdev);
+ long val;
+ int err;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return -EINVAL;
+
+ if (val > 0 && val < 100) {
+ u32 term = ep93xx_pwm_read_tc(pwm);
+ ep93xx_pwm_write_dc(pwm, ((term + 1) * val / 100) - 1);
+ pwm->duty_percent = val;
+ return count;
+ }
+
+ return -EINVAL;
+}
+
+static ssize_t ep93xx_pwm_get_invert(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ep93xx_pwm *pwm = platform_get_drvdata(pdev);
+
+ return sprintf(buf, "%d\n", ep93xx_pwm_is_inverted(pwm));
+}
+
+static ssize_t ep93xx_pwm_set_invert(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ep93xx_pwm *pwm = platform_get_drvdata(pdev);
+ long val;
+ int err;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return -EINVAL;
+
+ if (val == 0)
+ ep93xx_pwm_normal(pwm);
+ else if (val == 1)
+ ep93xx_pwm_invert(pwm);
+ else
+ return -EINVAL;
+
+ return count;
+}
+
+static DEVICE_ATTR(min_freq, S_IRUGO, ep93xx_pwm_get_min_freq, NULL);
+static DEVICE_ATTR(max_freq, S_IRUGO, ep93xx_pwm_get_max_freq, NULL);
+static DEVICE_ATTR(freq, S_IWUSR | S_IRUGO,
+ ep93xx_pwm_get_freq, ep93xx_pwm_set_freq);
+static DEVICE_ATTR(duty_percent, S_IWUSR | S_IRUGO,
+ ep93xx_pwm_get_duty_percent, ep93xx_pwm_set_duty_percent);
+static DEVICE_ATTR(invert, S_IWUSR | S_IRUGO,
+ ep93xx_pwm_get_invert, ep93xx_pwm_set_invert);
+
+static struct attribute *ep93xx_pwm_attrs[] = {
+ &dev_attr_min_freq.attr,
+ &dev_attr_max_freq.attr,
+ &dev_attr_freq.attr,
+ &dev_attr_duty_percent.attr,
+ &dev_attr_invert.attr,
+ NULL
+};
+
+static const struct attribute_group ep93xx_pwm_sysfs_files = {
+ .attrs = ep93xx_pwm_attrs,
+};
+
+static int __init ep93xx_pwm_probe(struct platform_device *pdev)
+{
+ struct ep93xx_pwm *pwm;
+ struct resource *res;
+ int err;
+
+ err = ep93xx_pwm_acquire_gpio(pdev);
+ if (err)
+ return err;
+
+ pwm = kzalloc(sizeof(struct ep93xx_pwm), GFP_KERNEL);
+ if (!pwm) {
+ err = -ENOMEM;
+ goto fail_no_mem;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ err = -ENXIO;
+ goto fail_no_mem_resource;
+ }
+
+ res = request_mem_region(res->start, resource_size(res), pdev->name);
+ if (res == NULL) {
+ err = -EBUSY;
+ goto fail_no_mem_resource;
+ }
+
+ pwm->mmio_base = ioremap(res->start, resource_size(res));
+ if (pwm->mmio_base == NULL) {
+ err = -ENXIO;
+ goto fail_no_ioremap;
+ }
+
+ err = sysfs_create_group(&pdev->dev.kobj, &ep93xx_pwm_sysfs_files);
+ if (err)
+ goto fail_no_sysfs;
+
+ pwm->clk = clk_get(&pdev->dev, "pwm_clk");
+ if (IS_ERR(pwm->clk)) {
+ err = PTR_ERR(pwm->clk);
+ goto fail_no_clk;
+ }
+
+ pwm->duty_percent = 50;
+
+ platform_set_drvdata(pdev, pwm);
+
+ /* disable pwm at startup. Avoids zero value. */
+ ep93xx_pwm_disable(pwm);
+ ep93xx_pwm_write_tc(pwm, EP93XX_PWM_MAX_COUNT);
+ ep93xx_pwm_write_dc(pwm, EP93XX_PWM_MAX_COUNT / 2);
+
+ clk_enable(pwm->clk);
+
+ return 0;
+
+fail_no_clk:
+ sysfs_remove_group(&pdev->dev.kobj, &ep93xx_pwm_sysfs_files);
+fail_no_sysfs:
+ iounmap(pwm->mmio_base);
+fail_no_ioremap:
+ release_mem_region(res->start, resource_size(res));
+fail_no_mem_resource:
+ kfree(pwm);
+fail_no_mem:
+ ep93xx_pwm_release_gpio(pdev);
+ return err;
+}
+
+static int __exit ep93xx_pwm_remove(struct platform_device *pdev)
+{
+ struct ep93xx_pwm *pwm = platform_get_drvdata(pdev);
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ ep93xx_pwm_disable(pwm);
+ clk_disable(pwm->clk);
+ clk_put(pwm->clk);
+ platform_set_drvdata(pdev, NULL);
+ sysfs_remove_group(&pdev->dev.kobj, &ep93xx_pwm_sysfs_files);
+ iounmap(pwm->mmio_base);
+ release_mem_region(res->start, resource_size(res));
+ kfree(pwm);
+ ep93xx_pwm_release_gpio(pdev);
+
+ return 0;
+}
+
+static struct platform_driver ep93xx_pwm_driver = {
+ .driver = {
+ .name = "ep93xx-pwm",
+ .owner = THIS_MODULE,
+ },
+ .remove = __exit_p(ep93xx_pwm_remove),
+};
+
+static int __init ep93xx_pwm_init(void)
+{
+ return platform_driver_probe(&ep93xx_pwm_driver, ep93xx_pwm_probe);
+}
+
+static void __exit ep93xx_pwm_exit(void)
+{
+ platform_driver_unregister(&ep93xx_pwm_driver);
+}
+
+module_init(ep93xx_pwm_init);
+module_exit(ep93xx_pwm_exit);
+
+MODULE_AUTHOR("Matthieu Crapet <mcrapet@gmail.com>, "
+ "H Hartley Sweeten <hsweeten@visionengravers.com>");
+MODULE_DESCRIPTION("EP93xx PWM driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:ep93xx-pwm");
diff --git a/drivers/misc/fsa9480.c b/drivers/misc/fsa9480.c
new file mode 100644
index 00000000..ac96c3a4
--- /dev/null
+++ b/drivers/misc/fsa9480.c
@@ -0,0 +1,546 @@
+/*
+ * fsa9480.c - FSA9480 micro USB switch device driver
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ * Minkyu Kang <mk7.kang@samsung.com>
+ * Wonguk Jeong <wonguk.jeong@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/platform_data/fsa9480.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+
+/* FSA9480 I2C registers */
+#define FSA9480_REG_DEVID 0x01
+#define FSA9480_REG_CTRL 0x02
+#define FSA9480_REG_INT1 0x03
+#define FSA9480_REG_INT2 0x04
+#define FSA9480_REG_INT1_MASK 0x05
+#define FSA9480_REG_INT2_MASK 0x06
+#define FSA9480_REG_ADC 0x07
+#define FSA9480_REG_TIMING1 0x08
+#define FSA9480_REG_TIMING2 0x09
+#define FSA9480_REG_DEV_T1 0x0a
+#define FSA9480_REG_DEV_T2 0x0b
+#define FSA9480_REG_BTN1 0x0c
+#define FSA9480_REG_BTN2 0x0d
+#define FSA9480_REG_CK 0x0e
+#define FSA9480_REG_CK_INT1 0x0f
+#define FSA9480_REG_CK_INT2 0x10
+#define FSA9480_REG_CK_INTMASK1 0x11
+#define FSA9480_REG_CK_INTMASK2 0x12
+#define FSA9480_REG_MANSW1 0x13
+#define FSA9480_REG_MANSW2 0x14
+
+/* Control */
+#define CON_SWITCH_OPEN (1 << 4)
+#define CON_RAW_DATA (1 << 3)
+#define CON_MANUAL_SW (1 << 2)
+#define CON_WAIT (1 << 1)
+#define CON_INT_MASK (1 << 0)
+#define CON_MASK (CON_SWITCH_OPEN | CON_RAW_DATA | \
+ CON_MANUAL_SW | CON_WAIT)
+
+/* Device Type 1 */
+#define DEV_USB_OTG (1 << 7)
+#define DEV_DEDICATED_CHG (1 << 6)
+#define DEV_USB_CHG (1 << 5)
+#define DEV_CAR_KIT (1 << 4)
+#define DEV_UART (1 << 3)
+#define DEV_USB (1 << 2)
+#define DEV_AUDIO_2 (1 << 1)
+#define DEV_AUDIO_1 (1 << 0)
+
+#define DEV_T1_USB_MASK (DEV_USB_OTG | DEV_USB)
+#define DEV_T1_UART_MASK (DEV_UART)
+#define DEV_T1_CHARGER_MASK (DEV_DEDICATED_CHG | DEV_USB_CHG)
+
+/* Device Type 2 */
+#define DEV_AV (1 << 6)
+#define DEV_TTY (1 << 5)
+#define DEV_PPD (1 << 4)
+#define DEV_JIG_UART_OFF (1 << 3)
+#define DEV_JIG_UART_ON (1 << 2)
+#define DEV_JIG_USB_OFF (1 << 1)
+#define DEV_JIG_USB_ON (1 << 0)
+
+#define DEV_T2_USB_MASK (DEV_JIG_USB_OFF | DEV_JIG_USB_ON)
+#define DEV_T2_UART_MASK (DEV_JIG_UART_OFF | DEV_JIG_UART_ON)
+#define DEV_T2_JIG_MASK (DEV_JIG_USB_OFF | DEV_JIG_USB_ON | \
+ DEV_JIG_UART_OFF | DEV_JIG_UART_ON)
+
+/*
+ * Manual Switch
+ * D- [7:5] / D+ [4:2]
+ * 000: Open all / 001: USB / 010: AUDIO / 011: UART / 100: V_AUDIO
+ */
+#define SW_VAUDIO ((4 << 5) | (4 << 2))
+#define SW_UART ((3 << 5) | (3 << 2))
+#define SW_AUDIO ((2 << 5) | (2 << 2))
+#define SW_DHOST ((1 << 5) | (1 << 2))
+#define SW_AUTO ((0 << 5) | (0 << 2))
+
+/* Interrupt 1 */
+#define INT_DETACH (1 << 1)
+#define INT_ATTACH (1 << 0)
+
+struct fsa9480_usbsw {
+ struct i2c_client *client;
+ struct fsa9480_platform_data *pdata;
+ int dev1;
+ int dev2;
+ int mansw;
+};
+
+static struct fsa9480_usbsw *chip;
+
+static int fsa9480_write_reg(struct i2c_client *client,
+ int reg, int value)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, reg, value);
+
+ if (ret < 0)
+ dev_err(&client->dev, "%s: err %d\n", __func__, ret);
+
+ return ret;
+}
+
+static int fsa9480_read_reg(struct i2c_client *client, int reg)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, reg);
+
+ if (ret < 0)
+ dev_err(&client->dev, "%s: err %d\n", __func__, ret);
+
+ return ret;
+}
+
+static int fsa9480_read_irq(struct i2c_client *client, int *value)
+{
+ int ret;
+
+ ret = i2c_smbus_read_i2c_block_data(client,
+ FSA9480_REG_INT1, 2, (u8 *)value);
+ *value &= 0xffff;
+
+ if (ret < 0)
+ dev_err(&client->dev, "%s: err %d\n", __func__, ret);
+
+ return ret;
+}
+
+static void fsa9480_set_switch(const char *buf)
+{
+ struct fsa9480_usbsw *usbsw = chip;
+ struct i2c_client *client = usbsw->client;
+ unsigned int value;
+ unsigned int path = 0;
+
+ value = fsa9480_read_reg(client, FSA9480_REG_CTRL);
+
+ if (!strncmp(buf, "VAUDIO", 6)) {
+ path = SW_VAUDIO;
+ value &= ~CON_MANUAL_SW;
+ } else if (!strncmp(buf, "UART", 4)) {
+ path = SW_UART;
+ value &= ~CON_MANUAL_SW;
+ } else if (!strncmp(buf, "AUDIO", 5)) {
+ path = SW_AUDIO;
+ value &= ~CON_MANUAL_SW;
+ } else if (!strncmp(buf, "DHOST", 5)) {
+ path = SW_DHOST;
+ value &= ~CON_MANUAL_SW;
+ } else if (!strncmp(buf, "AUTO", 4)) {
+ path = SW_AUTO;
+ value |= CON_MANUAL_SW;
+ } else {
+ printk(KERN_ERR "Wrong command\n");
+ return;
+ }
+
+ usbsw->mansw = path;
+ fsa9480_write_reg(client, FSA9480_REG_MANSW1, path);
+ fsa9480_write_reg(client, FSA9480_REG_CTRL, value);
+}
+
+static ssize_t fsa9480_get_switch(char *buf)
+{
+ struct fsa9480_usbsw *usbsw = chip;
+ struct i2c_client *client = usbsw->client;
+ unsigned int value;
+
+ value = fsa9480_read_reg(client, FSA9480_REG_MANSW1);
+
+ if (value == SW_VAUDIO)
+ return sprintf(buf, "VAUDIO\n");
+ else if (value == SW_UART)
+ return sprintf(buf, "UART\n");
+ else if (value == SW_AUDIO)
+ return sprintf(buf, "AUDIO\n");
+ else if (value == SW_DHOST)
+ return sprintf(buf, "DHOST\n");
+ else if (value == SW_AUTO)
+ return sprintf(buf, "AUTO\n");
+ else
+ return sprintf(buf, "%x", value);
+}
+
+static ssize_t fsa9480_show_device(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fsa9480_usbsw *usbsw = dev_get_drvdata(dev);
+ struct i2c_client *client = usbsw->client;
+ int dev1, dev2;
+
+ dev1 = fsa9480_read_reg(client, FSA9480_REG_DEV_T1);
+ dev2 = fsa9480_read_reg(client, FSA9480_REG_DEV_T2);
+
+ if (!dev1 && !dev2)
+ return sprintf(buf, "NONE\n");
+
+ /* USB */
+ if (dev1 & DEV_T1_USB_MASK || dev2 & DEV_T2_USB_MASK)
+ return sprintf(buf, "USB\n");
+
+ /* UART */
+ if (dev1 & DEV_T1_UART_MASK || dev2 & DEV_T2_UART_MASK)
+ return sprintf(buf, "UART\n");
+
+ /* CHARGER */
+ if (dev1 & DEV_T1_CHARGER_MASK)
+ return sprintf(buf, "CHARGER\n");
+
+ /* JIG */
+ if (dev2 & DEV_T2_JIG_MASK)
+ return sprintf(buf, "JIG\n");
+
+ return sprintf(buf, "UNKNOWN\n");
+}
+
+static ssize_t fsa9480_show_manualsw(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return fsa9480_get_switch(buf);
+
+}
+
+static ssize_t fsa9480_set_manualsw(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ fsa9480_set_switch(buf);
+
+ return count;
+}
+
+static DEVICE_ATTR(device, S_IRUGO, fsa9480_show_device, NULL);
+static DEVICE_ATTR(switch, S_IRUGO | S_IWUSR,
+ fsa9480_show_manualsw, fsa9480_set_manualsw);
+
+static struct attribute *fsa9480_attributes[] = {
+ &dev_attr_device.attr,
+ &dev_attr_switch.attr,
+ NULL
+};
+
+static const struct attribute_group fsa9480_group = {
+ .attrs = fsa9480_attributes,
+};
+
+static void fsa9480_detect_dev(struct fsa9480_usbsw *usbsw, int intr)
+{
+ int val1, val2, ctrl;
+ struct fsa9480_platform_data *pdata = usbsw->pdata;
+ struct i2c_client *client = usbsw->client;
+
+ val1 = fsa9480_read_reg(client, FSA9480_REG_DEV_T1);
+ val2 = fsa9480_read_reg(client, FSA9480_REG_DEV_T2);
+ ctrl = fsa9480_read_reg(client, FSA9480_REG_CTRL);
+
+ dev_info(&client->dev, "intr: 0x%x, dev1: 0x%x, dev2: 0x%x\n",
+ intr, val1, val2);
+
+ if (!intr)
+ goto out;
+
+ if (intr & INT_ATTACH) { /* Attached */
+ /* USB */
+ if (val1 & DEV_T1_USB_MASK || val2 & DEV_T2_USB_MASK) {
+ if (pdata->usb_cb)
+ pdata->usb_cb(FSA9480_ATTACHED);
+
+ if (usbsw->mansw) {
+ fsa9480_write_reg(client,
+ FSA9480_REG_MANSW1, usbsw->mansw);
+ }
+ }
+
+ /* UART */
+ if (val1 & DEV_T1_UART_MASK || val2 & DEV_T2_UART_MASK) {
+ if (pdata->uart_cb)
+ pdata->uart_cb(FSA9480_ATTACHED);
+
+ if (!(ctrl & CON_MANUAL_SW)) {
+ fsa9480_write_reg(client,
+ FSA9480_REG_MANSW1, SW_UART);
+ }
+ }
+
+ /* CHARGER */
+ if (val1 & DEV_T1_CHARGER_MASK) {
+ if (pdata->charger_cb)
+ pdata->charger_cb(FSA9480_ATTACHED);
+ }
+
+ /* JIG */
+ if (val2 & DEV_T2_JIG_MASK) {
+ if (pdata->jig_cb)
+ pdata->jig_cb(FSA9480_ATTACHED);
+ }
+ } else if (intr & INT_DETACH) { /* Detached */
+ /* USB */
+ if (usbsw->dev1 & DEV_T1_USB_MASK ||
+ usbsw->dev2 & DEV_T2_USB_MASK) {
+ if (pdata->usb_cb)
+ pdata->usb_cb(FSA9480_DETACHED);
+ }
+
+ /* UART */
+ if (usbsw->dev1 & DEV_T1_UART_MASK ||
+ usbsw->dev2 & DEV_T2_UART_MASK) {
+ if (pdata->uart_cb)
+ pdata->uart_cb(FSA9480_DETACHED);
+ }
+
+ /* CHARGER */
+ if (usbsw->dev1 & DEV_T1_CHARGER_MASK) {
+ if (pdata->charger_cb)
+ pdata->charger_cb(FSA9480_DETACHED);
+ }
+
+ /* JIG */
+ if (usbsw->dev2 & DEV_T2_JIG_MASK) {
+ if (pdata->jig_cb)
+ pdata->jig_cb(FSA9480_DETACHED);
+ }
+ }
+
+ usbsw->dev1 = val1;
+ usbsw->dev2 = val2;
+
+out:
+ ctrl &= ~CON_INT_MASK;
+ fsa9480_write_reg(client, FSA9480_REG_CTRL, ctrl);
+}
+
+static irqreturn_t fsa9480_irq_handler(int irq, void *data)
+{
+ struct fsa9480_usbsw *usbsw = data;
+ struct i2c_client *client = usbsw->client;
+ int intr;
+
+ /* clear interrupt */
+ fsa9480_read_irq(client, &intr);
+
+ /* device detection */
+ fsa9480_detect_dev(usbsw, intr);
+
+ return IRQ_HANDLED;
+}
+
+static int fsa9480_irq_init(struct fsa9480_usbsw *usbsw)
+{
+ struct fsa9480_platform_data *pdata = usbsw->pdata;
+ struct i2c_client *client = usbsw->client;
+ int ret;
+ int intr;
+ unsigned int ctrl = CON_MASK;
+
+ /* clear interrupt */
+ fsa9480_read_irq(client, &intr);
+
+ /* unmask interrupt (attach/detach only) */
+ fsa9480_write_reg(client, FSA9480_REG_INT1_MASK, 0xfc);
+ fsa9480_write_reg(client, FSA9480_REG_INT2_MASK, 0x1f);
+
+ usbsw->mansw = fsa9480_read_reg(client, FSA9480_REG_MANSW1);
+
+ if (usbsw->mansw)
+ ctrl &= ~CON_MANUAL_SW; /* Manual Switching Mode */
+
+ fsa9480_write_reg(client, FSA9480_REG_CTRL, ctrl);
+
+ if (pdata && pdata->cfg_gpio)
+ pdata->cfg_gpio();
+
+ if (client->irq) {
+ ret = request_threaded_irq(client->irq, NULL,
+ fsa9480_irq_handler,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "fsa9480 micro USB", usbsw);
+ if (ret) {
+ dev_err(&client->dev, "failed to reqeust IRQ\n");
+ return ret;
+ }
+
+ if (pdata)
+ device_init_wakeup(&client->dev, pdata->wakeup);
+ }
+
+ return 0;
+}
+
+static int __devinit fsa9480_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct fsa9480_usbsw *usbsw;
+ int ret = 0;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -EIO;
+
+ usbsw = kzalloc(sizeof(struct fsa9480_usbsw), GFP_KERNEL);
+ if (!usbsw) {
+ dev_err(&client->dev, "failed to allocate driver data\n");
+ return -ENOMEM;
+ }
+
+ usbsw->client = client;
+ usbsw->pdata = client->dev.platform_data;
+
+ chip = usbsw;
+
+ i2c_set_clientdata(client, usbsw);
+
+ ret = fsa9480_irq_init(usbsw);
+ if (ret)
+ goto fail1;
+
+ ret = sysfs_create_group(&client->dev.kobj, &fsa9480_group);
+ if (ret) {
+ dev_err(&client->dev,
+ "failed to create fsa9480 attribute group\n");
+ goto fail2;
+ }
+
+ /* ADC Detect Time: 500ms */
+ fsa9480_write_reg(client, FSA9480_REG_TIMING1, 0x6);
+
+ if (chip->pdata->reset_cb)
+ chip->pdata->reset_cb();
+
+ /* device detection */
+ fsa9480_detect_dev(usbsw, INT_ATTACH);
+
+ pm_runtime_set_active(&client->dev);
+
+ return 0;
+
+fail2:
+ if (client->irq)
+ free_irq(client->irq, usbsw);
+fail1:
+ kfree(usbsw);
+ return ret;
+}
+
+static int __devexit fsa9480_remove(struct i2c_client *client)
+{
+ struct fsa9480_usbsw *usbsw = i2c_get_clientdata(client);
+ if (client->irq)
+ free_irq(client->irq, usbsw);
+
+ sysfs_remove_group(&client->dev.kobj, &fsa9480_group);
+ device_init_wakeup(&client->dev, 0);
+ kfree(usbsw);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int fsa9480_suspend(struct i2c_client *client, pm_message_t state)
+{
+ struct fsa9480_usbsw *usbsw = i2c_get_clientdata(client);
+ struct fsa9480_platform_data *pdata = usbsw->pdata;
+
+ if (device_may_wakeup(&client->dev) && client->irq)
+ enable_irq_wake(client->irq);
+
+ if (pdata->usb_power)
+ pdata->usb_power(0);
+
+ return 0;
+}
+
+static int fsa9480_resume(struct i2c_client *client)
+{
+ struct fsa9480_usbsw *usbsw = i2c_get_clientdata(client);
+ int dev1, dev2;
+
+ if (device_may_wakeup(&client->dev) && client->irq)
+ disable_irq_wake(client->irq);
+
+ /*
+ * Clear Pending interrupt. Note that detect_dev does what
+ * the interrupt handler does. So, we don't miss pending and
+ * we reenable interrupt if there is one.
+ */
+ fsa9480_read_reg(client, FSA9480_REG_INT1);
+ fsa9480_read_reg(client, FSA9480_REG_INT2);
+
+ dev1 = fsa9480_read_reg(client, FSA9480_REG_DEV_T1);
+ dev2 = fsa9480_read_reg(client, FSA9480_REG_DEV_T2);
+
+ /* device detection */
+ fsa9480_detect_dev(usbsw, (dev1 || dev2) ? INT_ATTACH : INT_DETACH);
+
+ return 0;
+}
+
+#else
+
+#define fsa9480_suspend NULL
+#define fsa9480_resume NULL
+
+#endif /* CONFIG_PM */
+
+static const struct i2c_device_id fsa9480_id[] = {
+ {"fsa9480", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, fsa9480_id);
+
+static struct i2c_driver fsa9480_i2c_driver = {
+ .driver = {
+ .name = "fsa9480",
+ },
+ .probe = fsa9480_probe,
+ .remove = __devexit_p(fsa9480_remove),
+ .resume = fsa9480_resume,
+ .suspend = fsa9480_suspend,
+ .id_table = fsa9480_id,
+};
+
+module_i2c_driver(fsa9480_i2c_driver);
+
+MODULE_AUTHOR("Minkyu Kang <mk7.kang@samsung.com>");
+MODULE_DESCRIPTION("FSA9480 USB Switch driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/hmc6352.c b/drivers/misc/hmc6352.c
new file mode 100644
index 00000000..423cd40f
--- /dev/null
+++ b/drivers/misc/hmc6352.c
@@ -0,0 +1,155 @@
+/*
+ * hmc6352.c - Honeywell Compass Driver
+ *
+ * Copyright (C) 2009 Intel Corp
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/sysfs.h>
+
+static DEFINE_MUTEX(compass_mutex);
+
+static int compass_command(struct i2c_client *c, u8 cmd)
+{
+ int ret = i2c_master_send(c, &cmd, 1);
+ if (ret < 0)
+ dev_warn(&c->dev, "command '%c' failed.\n", cmd);
+ return ret;
+}
+
+static int compass_store(struct device *dev, const char *buf, size_t count,
+ const char *map)
+{
+ struct i2c_client *c = to_i2c_client(dev);
+ int ret;
+ unsigned long val;
+
+ if (strict_strtoul(buf, 10, &val))
+ return -EINVAL;
+ if (val >= strlen(map))
+ return -EINVAL;
+ mutex_lock(&compass_mutex);
+ ret = compass_command(c, map[val]);
+ mutex_unlock(&compass_mutex);
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
+static ssize_t compass_calibration_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ return compass_store(dev, buf, count, "EC");
+}
+
+static ssize_t compass_power_mode_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ return compass_store(dev, buf, count, "SW");
+}
+
+static ssize_t compass_heading_data_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ unsigned char i2c_data[2];
+ int ret;
+
+ mutex_lock(&compass_mutex);
+ ret = compass_command(client, 'A');
+ if (ret != 1) {
+ mutex_unlock(&compass_mutex);
+ return ret;
+ }
+ msleep(10); /* sending 'A' cmd we need to wait for 7-10 millisecs */
+ ret = i2c_master_recv(client, i2c_data, 2);
+ mutex_unlock(&compass_mutex);
+ if (ret < 0) {
+ dev_warn(dev, "i2c read data cmd failed\n");
+ return ret;
+ }
+ ret = (i2c_data[0] << 8) | i2c_data[1];
+ return sprintf(buf, "%d.%d\n", ret/10, ret%10);
+}
+
+
+static DEVICE_ATTR(heading0_input, S_IRUGO, compass_heading_data_show, NULL);
+static DEVICE_ATTR(calibration, S_IWUSR, NULL, compass_calibration_store);
+static DEVICE_ATTR(power_state, S_IWUSR, NULL, compass_power_mode_store);
+
+static struct attribute *mid_att_compass[] = {
+ &dev_attr_heading0_input.attr,
+ &dev_attr_calibration.attr,
+ &dev_attr_power_state.attr,
+ NULL
+};
+
+static const struct attribute_group m_compass_gr = {
+ .name = "hmc6352",
+ .attrs = mid_att_compass
+};
+
+static int hmc6352_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int res;
+
+ res = sysfs_create_group(&client->dev.kobj, &m_compass_gr);
+ if (res) {
+ dev_err(&client->dev, "device_create_file failed\n");
+ return res;
+ }
+ dev_info(&client->dev, "%s HMC6352 compass chip found\n",
+ client->name);
+ return 0;
+}
+
+static int hmc6352_remove(struct i2c_client *client)
+{
+ sysfs_remove_group(&client->dev.kobj, &m_compass_gr);
+ return 0;
+}
+
+static struct i2c_device_id hmc6352_id[] = {
+ { "hmc6352", 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, hmc6352_id);
+
+static struct i2c_driver hmc6352_driver = {
+ .driver = {
+ .name = "hmc6352",
+ },
+ .probe = hmc6352_probe,
+ .remove = hmc6352_remove,
+ .id_table = hmc6352_id,
+};
+
+module_i2c_driver(hmc6352_driver);
+
+MODULE_AUTHOR("Kalhan Trisal <kalhan.trisal@intel.com");
+MODULE_DESCRIPTION("hmc6352 Compass Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c
new file mode 100644
index 00000000..fffc2271
--- /dev/null
+++ b/drivers/misc/hpilo.c
@@ -0,0 +1,889 @@
+/*
+ * Driver for the HP iLO management processor.
+ *
+ * Copyright (C) 2008 Hewlett-Packard Development Company, L.P.
+ * David Altobelli <david.altobelli@hp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/device.h>
+#include <linux/file.h>
+#include <linux/cdev.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/wait.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include "hpilo.h"
+
+static struct class *ilo_class;
+static unsigned int ilo_major;
+static char ilo_hwdev[MAX_ILO_DEV];
+
+static inline int get_entry_id(int entry)
+{
+ return (entry & ENTRY_MASK_DESCRIPTOR) >> ENTRY_BITPOS_DESCRIPTOR;
+}
+
+static inline int get_entry_len(int entry)
+{
+ return ((entry & ENTRY_MASK_QWORDS) >> ENTRY_BITPOS_QWORDS) << 3;
+}
+
+static inline int mk_entry(int id, int len)
+{
+ int qlen = len & 7 ? (len >> 3) + 1 : len >> 3;
+ return id << ENTRY_BITPOS_DESCRIPTOR | qlen << ENTRY_BITPOS_QWORDS;
+}
+
+static inline int desc_mem_sz(int nr_entry)
+{
+ return nr_entry << L2_QENTRY_SZ;
+}
+
+/*
+ * FIFO queues, shared with hardware.
+ *
+ * If a queue has empty slots, an entry is added to the queue tail,
+ * and that entry is marked as occupied.
+ * Entries can be dequeued from the head of the list, when the device
+ * has marked the entry as consumed.
+ *
+ * Returns true on successful queue/dequeue, false on failure.
+ */
+static int fifo_enqueue(struct ilo_hwinfo *hw, char *fifobar, int entry)
+{
+ struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar);
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&hw->fifo_lock, flags);
+ if (!(fifo_q->fifobar[(fifo_q->tail + 1) & fifo_q->imask]
+ & ENTRY_MASK_O)) {
+ fifo_q->fifobar[fifo_q->tail & fifo_q->imask] |=
+ (entry & ENTRY_MASK_NOSTATE) | fifo_q->merge;
+ fifo_q->tail += 1;
+ ret = 1;
+ }
+ spin_unlock_irqrestore(&hw->fifo_lock, flags);
+
+ return ret;
+}
+
+static int fifo_dequeue(struct ilo_hwinfo *hw, char *fifobar, int *entry)
+{
+ struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar);
+ unsigned long flags;
+ int ret = 0;
+ u64 c;
+
+ spin_lock_irqsave(&hw->fifo_lock, flags);
+ c = fifo_q->fifobar[fifo_q->head & fifo_q->imask];
+ if (c & ENTRY_MASK_C) {
+ if (entry)
+ *entry = c & ENTRY_MASK_NOSTATE;
+
+ fifo_q->fifobar[fifo_q->head & fifo_q->imask] =
+ (c | ENTRY_MASK) + 1;
+ fifo_q->head += 1;
+ ret = 1;
+ }
+ spin_unlock_irqrestore(&hw->fifo_lock, flags);
+
+ return ret;
+}
+
+static int fifo_check_recv(struct ilo_hwinfo *hw, char *fifobar)
+{
+ struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar);
+ unsigned long flags;
+ int ret = 0;
+ u64 c;
+
+ spin_lock_irqsave(&hw->fifo_lock, flags);
+ c = fifo_q->fifobar[fifo_q->head & fifo_q->imask];
+ if (c & ENTRY_MASK_C)
+ ret = 1;
+ spin_unlock_irqrestore(&hw->fifo_lock, flags);
+
+ return ret;
+}
+
+static int ilo_pkt_enqueue(struct ilo_hwinfo *hw, struct ccb *ccb,
+ int dir, int id, int len)
+{
+ char *fifobar;
+ int entry;
+
+ if (dir == SENDQ)
+ fifobar = ccb->ccb_u1.send_fifobar;
+ else
+ fifobar = ccb->ccb_u3.recv_fifobar;
+
+ entry = mk_entry(id, len);
+ return fifo_enqueue(hw, fifobar, entry);
+}
+
+static int ilo_pkt_dequeue(struct ilo_hwinfo *hw, struct ccb *ccb,
+ int dir, int *id, int *len, void **pkt)
+{
+ char *fifobar, *desc;
+ int entry = 0, pkt_id = 0;
+ int ret;
+
+ if (dir == SENDQ) {
+ fifobar = ccb->ccb_u1.send_fifobar;
+ desc = ccb->ccb_u2.send_desc;
+ } else {
+ fifobar = ccb->ccb_u3.recv_fifobar;
+ desc = ccb->ccb_u4.recv_desc;
+ }
+
+ ret = fifo_dequeue(hw, fifobar, &entry);
+ if (ret) {
+ pkt_id = get_entry_id(entry);
+ if (id)
+ *id = pkt_id;
+ if (len)
+ *len = get_entry_len(entry);
+ if (pkt)
+ *pkt = (void *)(desc + desc_mem_sz(pkt_id));
+ }
+
+ return ret;
+}
+
+static int ilo_pkt_recv(struct ilo_hwinfo *hw, struct ccb *ccb)
+{
+ char *fifobar = ccb->ccb_u3.recv_fifobar;
+
+ return fifo_check_recv(hw, fifobar);
+}
+
+static inline void doorbell_set(struct ccb *ccb)
+{
+ iowrite8(1, ccb->ccb_u5.db_base);
+}
+
+static inline void doorbell_clr(struct ccb *ccb)
+{
+ iowrite8(2, ccb->ccb_u5.db_base);
+}
+
+static inline int ctrl_set(int l2sz, int idxmask, int desclim)
+{
+ int active = 0, go = 1;
+ return l2sz << CTRL_BITPOS_L2SZ |
+ idxmask << CTRL_BITPOS_FIFOINDEXMASK |
+ desclim << CTRL_BITPOS_DESCLIMIT |
+ active << CTRL_BITPOS_A |
+ go << CTRL_BITPOS_G;
+}
+
+static void ctrl_setup(struct ccb *ccb, int nr_desc, int l2desc_sz)
+{
+ /* for simplicity, use the same parameters for send and recv ctrls */
+ ccb->send_ctrl = ctrl_set(l2desc_sz, nr_desc-1, nr_desc-1);
+ ccb->recv_ctrl = ctrl_set(l2desc_sz, nr_desc-1, nr_desc-1);
+}
+
+static inline int fifo_sz(int nr_entry)
+{
+ /* size of a fifo is determined by the number of entries it contains */
+ return (nr_entry * sizeof(u64)) + FIFOHANDLESIZE;
+}
+
+static void fifo_setup(void *base_addr, int nr_entry)
+{
+ struct fifo *fifo_q = base_addr;
+ int i;
+
+ /* set up an empty fifo */
+ fifo_q->head = 0;
+ fifo_q->tail = 0;
+ fifo_q->reset = 0;
+ fifo_q->nrents = nr_entry;
+ fifo_q->imask = nr_entry - 1;
+ fifo_q->merge = ENTRY_MASK_O;
+
+ for (i = 0; i < nr_entry; i++)
+ fifo_q->fifobar[i] = 0;
+}
+
+static void ilo_ccb_close(struct pci_dev *pdev, struct ccb_data *data)
+{
+ struct ccb *driver_ccb = &data->driver_ccb;
+ struct ccb __iomem *device_ccb = data->mapped_ccb;
+ int retries;
+
+ /* complicated dance to tell the hw we are stopping */
+ doorbell_clr(driver_ccb);
+ iowrite32(ioread32(&device_ccb->send_ctrl) & ~(1 << CTRL_BITPOS_G),
+ &device_ccb->send_ctrl);
+ iowrite32(ioread32(&device_ccb->recv_ctrl) & ~(1 << CTRL_BITPOS_G),
+ &device_ccb->recv_ctrl);
+
+ /* give iLO some time to process stop request */
+ for (retries = MAX_WAIT; retries > 0; retries--) {
+ doorbell_set(driver_ccb);
+ udelay(WAIT_TIME);
+ if (!(ioread32(&device_ccb->send_ctrl) & (1 << CTRL_BITPOS_A))
+ &&
+ !(ioread32(&device_ccb->recv_ctrl) & (1 << CTRL_BITPOS_A)))
+ break;
+ }
+ if (retries == 0)
+ dev_err(&pdev->dev, "Closing, but controller still active\n");
+
+ /* clear the hw ccb */
+ memset_io(device_ccb, 0, sizeof(struct ccb));
+
+ /* free resources used to back send/recv queues */
+ pci_free_consistent(pdev, data->dma_size, data->dma_va, data->dma_pa);
+}
+
+static int ilo_ccb_setup(struct ilo_hwinfo *hw, struct ccb_data *data, int slot)
+{
+ char *dma_va;
+ dma_addr_t dma_pa;
+ struct ccb *driver_ccb, *ilo_ccb;
+
+ driver_ccb = &data->driver_ccb;
+ ilo_ccb = &data->ilo_ccb;
+
+ data->dma_size = 2 * fifo_sz(NR_QENTRY) +
+ 2 * desc_mem_sz(NR_QENTRY) +
+ ILO_START_ALIGN + ILO_CACHE_SZ;
+
+ data->dma_va = pci_alloc_consistent(hw->ilo_dev, data->dma_size,
+ &data->dma_pa);
+ if (!data->dma_va)
+ return -ENOMEM;
+
+ dma_va = (char *)data->dma_va;
+ dma_pa = data->dma_pa;
+
+ memset(dma_va, 0, data->dma_size);
+
+ dma_va = (char *)roundup((unsigned long)dma_va, ILO_START_ALIGN);
+ dma_pa = roundup(dma_pa, ILO_START_ALIGN);
+
+ /*
+ * Create two ccb's, one with virt addrs, one with phys addrs.
+ * Copy the phys addr ccb to device shared mem.
+ */
+ ctrl_setup(driver_ccb, NR_QENTRY, L2_QENTRY_SZ);
+ ctrl_setup(ilo_ccb, NR_QENTRY, L2_QENTRY_SZ);
+
+ fifo_setup(dma_va, NR_QENTRY);
+ driver_ccb->ccb_u1.send_fifobar = dma_va + FIFOHANDLESIZE;
+ ilo_ccb->ccb_u1.send_fifobar_pa = dma_pa + FIFOHANDLESIZE;
+ dma_va += fifo_sz(NR_QENTRY);
+ dma_pa += fifo_sz(NR_QENTRY);
+
+ dma_va = (char *)roundup((unsigned long)dma_va, ILO_CACHE_SZ);
+ dma_pa = roundup(dma_pa, ILO_CACHE_SZ);
+
+ fifo_setup(dma_va, NR_QENTRY);
+ driver_ccb->ccb_u3.recv_fifobar = dma_va + FIFOHANDLESIZE;
+ ilo_ccb->ccb_u3.recv_fifobar_pa = dma_pa + FIFOHANDLESIZE;
+ dma_va += fifo_sz(NR_QENTRY);
+ dma_pa += fifo_sz(NR_QENTRY);
+
+ driver_ccb->ccb_u2.send_desc = dma_va;
+ ilo_ccb->ccb_u2.send_desc_pa = dma_pa;
+ dma_pa += desc_mem_sz(NR_QENTRY);
+ dma_va += desc_mem_sz(NR_QENTRY);
+
+ driver_ccb->ccb_u4.recv_desc = dma_va;
+ ilo_ccb->ccb_u4.recv_desc_pa = dma_pa;
+
+ driver_ccb->channel = slot;
+ ilo_ccb->channel = slot;
+
+ driver_ccb->ccb_u5.db_base = hw->db_vaddr + (slot << L2_DB_SIZE);
+ ilo_ccb->ccb_u5.db_base = NULL; /* hw ccb's doorbell is not used */
+
+ return 0;
+}
+
+static void ilo_ccb_open(struct ilo_hwinfo *hw, struct ccb_data *data, int slot)
+{
+ int pkt_id, pkt_sz;
+ struct ccb *driver_ccb = &data->driver_ccb;
+
+ /* copy the ccb with physical addrs to device memory */
+ data->mapped_ccb = (struct ccb __iomem *)
+ (hw->ram_vaddr + (slot * ILOHW_CCB_SZ));
+ memcpy_toio(data->mapped_ccb, &data->ilo_ccb, sizeof(struct ccb));
+
+ /* put packets on the send and receive queues */
+ pkt_sz = 0;
+ for (pkt_id = 0; pkt_id < NR_QENTRY; pkt_id++) {
+ ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, pkt_sz);
+ doorbell_set(driver_ccb);
+ }
+
+ pkt_sz = desc_mem_sz(1);
+ for (pkt_id = 0; pkt_id < NR_QENTRY; pkt_id++)
+ ilo_pkt_enqueue(hw, driver_ccb, RECVQ, pkt_id, pkt_sz);
+
+ /* the ccb is ready to use */
+ doorbell_clr(driver_ccb);
+}
+
+static int ilo_ccb_verify(struct ilo_hwinfo *hw, struct ccb_data *data)
+{
+ int pkt_id, i;
+ struct ccb *driver_ccb = &data->driver_ccb;
+
+ /* make sure iLO is really handling requests */
+ for (i = MAX_WAIT; i > 0; i--) {
+ if (ilo_pkt_dequeue(hw, driver_ccb, SENDQ, &pkt_id, NULL, NULL))
+ break;
+ udelay(WAIT_TIME);
+ }
+
+ if (i == 0) {
+ dev_err(&hw->ilo_dev->dev, "Open could not dequeue a packet\n");
+ return -EBUSY;
+ }
+
+ ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, 0);
+ doorbell_set(driver_ccb);
+ return 0;
+}
+
+static inline int is_channel_reset(struct ccb *ccb)
+{
+ /* check for this particular channel needing a reset */
+ return FIFOBARTOHANDLE(ccb->ccb_u1.send_fifobar)->reset;
+}
+
+static inline void set_channel_reset(struct ccb *ccb)
+{
+ /* set a flag indicating this channel needs a reset */
+ FIFOBARTOHANDLE(ccb->ccb_u1.send_fifobar)->reset = 1;
+}
+
+static inline int get_device_outbound(struct ilo_hwinfo *hw)
+{
+ return ioread32(&hw->mmio_vaddr[DB_OUT]);
+}
+
+static inline int is_db_reset(int db_out)
+{
+ return db_out & (1 << DB_RESET);
+}
+
+static inline int is_device_reset(struct ilo_hwinfo *hw)
+{
+ /* check for global reset condition */
+ return is_db_reset(get_device_outbound(hw));
+}
+
+static inline void clear_pending_db(struct ilo_hwinfo *hw, int clr)
+{
+ iowrite32(clr, &hw->mmio_vaddr[DB_OUT]);
+}
+
+static inline void clear_device(struct ilo_hwinfo *hw)
+{
+ /* clear the device (reset bits, pending channel entries) */
+ clear_pending_db(hw, -1);
+}
+
+static inline void ilo_enable_interrupts(struct ilo_hwinfo *hw)
+{
+ iowrite8(ioread8(&hw->mmio_vaddr[DB_IRQ]) | 1, &hw->mmio_vaddr[DB_IRQ]);
+}
+
+static inline void ilo_disable_interrupts(struct ilo_hwinfo *hw)
+{
+ iowrite8(ioread8(&hw->mmio_vaddr[DB_IRQ]) & ~1,
+ &hw->mmio_vaddr[DB_IRQ]);
+}
+
+static void ilo_set_reset(struct ilo_hwinfo *hw)
+{
+ int slot;
+
+ /*
+ * Mapped memory is zeroed on ilo reset, so set a per ccb flag
+ * to indicate that this ccb needs to be closed and reopened.
+ */
+ for (slot = 0; slot < MAX_CCB; slot++) {
+ if (!hw->ccb_alloc[slot])
+ continue;
+ set_channel_reset(&hw->ccb_alloc[slot]->driver_ccb);
+ }
+}
+
+static ssize_t ilo_read(struct file *fp, char __user *buf,
+ size_t len, loff_t *off)
+{
+ int err, found, cnt, pkt_id, pkt_len;
+ struct ccb_data *data = fp->private_data;
+ struct ccb *driver_ccb = &data->driver_ccb;
+ struct ilo_hwinfo *hw = data->ilo_hw;
+ void *pkt;
+
+ if (is_channel_reset(driver_ccb)) {
+ /*
+ * If the device has been reset, applications
+ * need to close and reopen all ccbs.
+ */
+ return -ENODEV;
+ }
+
+ /*
+ * This function is to be called when data is expected
+ * in the channel, and will return an error if no packet is found
+ * during the loop below. The sleep/retry logic is to allow
+ * applications to call read() immediately post write(),
+ * and give iLO some time to process the sent packet.
+ */
+ cnt = 20;
+ do {
+ /* look for a received packet */
+ found = ilo_pkt_dequeue(hw, driver_ccb, RECVQ, &pkt_id,
+ &pkt_len, &pkt);
+ if (found)
+ break;
+ cnt--;
+ msleep(100);
+ } while (!found && cnt);
+
+ if (!found)
+ return -EAGAIN;
+
+ /* only copy the length of the received packet */
+ if (pkt_len < len)
+ len = pkt_len;
+
+ err = copy_to_user(buf, pkt, len);
+
+ /* return the received packet to the queue */
+ ilo_pkt_enqueue(hw, driver_ccb, RECVQ, pkt_id, desc_mem_sz(1));
+
+ return err ? -EFAULT : len;
+}
+
+static ssize_t ilo_write(struct file *fp, const char __user *buf,
+ size_t len, loff_t *off)
+{
+ int err, pkt_id, pkt_len;
+ struct ccb_data *data = fp->private_data;
+ struct ccb *driver_ccb = &data->driver_ccb;
+ struct ilo_hwinfo *hw = data->ilo_hw;
+ void *pkt;
+
+ if (is_channel_reset(driver_ccb))
+ return -ENODEV;
+
+ /* get a packet to send the user command */
+ if (!ilo_pkt_dequeue(hw, driver_ccb, SENDQ, &pkt_id, &pkt_len, &pkt))
+ return -EBUSY;
+
+ /* limit the length to the length of the packet */
+ if (pkt_len < len)
+ len = pkt_len;
+
+ /* on failure, set the len to 0 to return empty packet to the device */
+ err = copy_from_user(pkt, buf, len);
+ if (err)
+ len = 0;
+
+ /* send the packet */
+ ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, len);
+ doorbell_set(driver_ccb);
+
+ return err ? -EFAULT : len;
+}
+
+static unsigned int ilo_poll(struct file *fp, poll_table *wait)
+{
+ struct ccb_data *data = fp->private_data;
+ struct ccb *driver_ccb = &data->driver_ccb;
+
+ poll_wait(fp, &data->ccb_waitq, wait);
+
+ if (is_channel_reset(driver_ccb))
+ return POLLERR;
+ else if (ilo_pkt_recv(data->ilo_hw, driver_ccb))
+ return POLLIN | POLLRDNORM;
+
+ return 0;
+}
+
+static int ilo_close(struct inode *ip, struct file *fp)
+{
+ int slot;
+ struct ccb_data *data;
+ struct ilo_hwinfo *hw;
+ unsigned long flags;
+
+ slot = iminor(ip) % MAX_CCB;
+ hw = container_of(ip->i_cdev, struct ilo_hwinfo, cdev);
+
+ spin_lock(&hw->open_lock);
+
+ if (hw->ccb_alloc[slot]->ccb_cnt == 1) {
+
+ data = fp->private_data;
+
+ spin_lock_irqsave(&hw->alloc_lock, flags);
+ hw->ccb_alloc[slot] = NULL;
+ spin_unlock_irqrestore(&hw->alloc_lock, flags);
+
+ ilo_ccb_close(hw->ilo_dev, data);
+
+ kfree(data);
+ } else
+ hw->ccb_alloc[slot]->ccb_cnt--;
+
+ spin_unlock(&hw->open_lock);
+
+ return 0;
+}
+
+static int ilo_open(struct inode *ip, struct file *fp)
+{
+ int slot, error;
+ struct ccb_data *data;
+ struct ilo_hwinfo *hw;
+ unsigned long flags;
+
+ slot = iminor(ip) % MAX_CCB;
+ hw = container_of(ip->i_cdev, struct ilo_hwinfo, cdev);
+
+ /* new ccb allocation */
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ spin_lock(&hw->open_lock);
+
+ /* each fd private_data holds sw/hw view of ccb */
+ if (hw->ccb_alloc[slot] == NULL) {
+ /* create a channel control block for this minor */
+ error = ilo_ccb_setup(hw, data, slot);
+ if (error) {
+ kfree(data);
+ goto out;
+ }
+
+ data->ccb_cnt = 1;
+ data->ccb_excl = fp->f_flags & O_EXCL;
+ data->ilo_hw = hw;
+ init_waitqueue_head(&data->ccb_waitq);
+
+ /* write the ccb to hw */
+ spin_lock_irqsave(&hw->alloc_lock, flags);
+ ilo_ccb_open(hw, data, slot);
+ hw->ccb_alloc[slot] = data;
+ spin_unlock_irqrestore(&hw->alloc_lock, flags);
+
+ /* make sure the channel is functional */
+ error = ilo_ccb_verify(hw, data);
+ if (error) {
+
+ spin_lock_irqsave(&hw->alloc_lock, flags);
+ hw->ccb_alloc[slot] = NULL;
+ spin_unlock_irqrestore(&hw->alloc_lock, flags);
+
+ ilo_ccb_close(hw->ilo_dev, data);
+
+ kfree(data);
+ goto out;
+ }
+
+ } else {
+ kfree(data);
+ if (fp->f_flags & O_EXCL || hw->ccb_alloc[slot]->ccb_excl) {
+ /*
+ * The channel exists, and either this open
+ * or a previous open of this channel wants
+ * exclusive access.
+ */
+ error = -EBUSY;
+ } else {
+ hw->ccb_alloc[slot]->ccb_cnt++;
+ error = 0;
+ }
+ }
+out:
+ spin_unlock(&hw->open_lock);
+
+ if (!error)
+ fp->private_data = hw->ccb_alloc[slot];
+
+ return error;
+}
+
+static const struct file_operations ilo_fops = {
+ .owner = THIS_MODULE,
+ .read = ilo_read,
+ .write = ilo_write,
+ .poll = ilo_poll,
+ .open = ilo_open,
+ .release = ilo_close,
+ .llseek = noop_llseek,
+};
+
+static irqreturn_t ilo_isr(int irq, void *data)
+{
+ struct ilo_hwinfo *hw = data;
+ int pending, i;
+
+ spin_lock(&hw->alloc_lock);
+
+ /* check for ccbs which have data */
+ pending = get_device_outbound(hw);
+ if (!pending) {
+ spin_unlock(&hw->alloc_lock);
+ return IRQ_NONE;
+ }
+
+ if (is_db_reset(pending)) {
+ /* wake up all ccbs if the device was reset */
+ pending = -1;
+ ilo_set_reset(hw);
+ }
+
+ for (i = 0; i < MAX_CCB; i++) {
+ if (!hw->ccb_alloc[i])
+ continue;
+ if (pending & (1 << i))
+ wake_up_interruptible(&hw->ccb_alloc[i]->ccb_waitq);
+ }
+
+ /* clear the device of the channels that have been handled */
+ clear_pending_db(hw, pending);
+
+ spin_unlock(&hw->alloc_lock);
+
+ return IRQ_HANDLED;
+}
+
+static void ilo_unmap_device(struct pci_dev *pdev, struct ilo_hwinfo *hw)
+{
+ pci_iounmap(pdev, hw->db_vaddr);
+ pci_iounmap(pdev, hw->ram_vaddr);
+ pci_iounmap(pdev, hw->mmio_vaddr);
+}
+
+static int __devinit ilo_map_device(struct pci_dev *pdev, struct ilo_hwinfo *hw)
+{
+ int error = -ENOMEM;
+
+ /* map the memory mapped i/o registers */
+ hw->mmio_vaddr = pci_iomap(pdev, 1, 0);
+ if (hw->mmio_vaddr == NULL) {
+ dev_err(&pdev->dev, "Error mapping mmio\n");
+ goto out;
+ }
+
+ /* map the adapter shared memory region */
+ hw->ram_vaddr = pci_iomap(pdev, 2, MAX_CCB * ILOHW_CCB_SZ);
+ if (hw->ram_vaddr == NULL) {
+ dev_err(&pdev->dev, "Error mapping shared mem\n");
+ goto mmio_free;
+ }
+
+ /* map the doorbell aperture */
+ hw->db_vaddr = pci_iomap(pdev, 3, MAX_CCB * ONE_DB_SIZE);
+ if (hw->db_vaddr == NULL) {
+ dev_err(&pdev->dev, "Error mapping doorbell\n");
+ goto ram_free;
+ }
+
+ return 0;
+ram_free:
+ pci_iounmap(pdev, hw->ram_vaddr);
+mmio_free:
+ pci_iounmap(pdev, hw->mmio_vaddr);
+out:
+ return error;
+}
+
+static void ilo_remove(struct pci_dev *pdev)
+{
+ int i, minor;
+ struct ilo_hwinfo *ilo_hw = pci_get_drvdata(pdev);
+
+ clear_device(ilo_hw);
+
+ minor = MINOR(ilo_hw->cdev.dev);
+ for (i = minor; i < minor + MAX_CCB; i++)
+ device_destroy(ilo_class, MKDEV(ilo_major, i));
+
+ cdev_del(&ilo_hw->cdev);
+ ilo_disable_interrupts(ilo_hw);
+ free_irq(pdev->irq, ilo_hw);
+ ilo_unmap_device(pdev, ilo_hw);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ kfree(ilo_hw);
+ ilo_hwdev[(minor / MAX_CCB)] = 0;
+}
+
+static int __devinit ilo_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int devnum, minor, start, error;
+ struct ilo_hwinfo *ilo_hw;
+
+ /* find a free range for device files */
+ for (devnum = 0; devnum < MAX_ILO_DEV; devnum++) {
+ if (ilo_hwdev[devnum] == 0) {
+ ilo_hwdev[devnum] = 1;
+ break;
+ }
+ }
+
+ if (devnum == MAX_ILO_DEV) {
+ dev_err(&pdev->dev, "Error finding free device\n");
+ return -ENODEV;
+ }
+
+ /* track global allocations for this device */
+ error = -ENOMEM;
+ ilo_hw = kzalloc(sizeof(*ilo_hw), GFP_KERNEL);
+ if (!ilo_hw)
+ goto out;
+
+ ilo_hw->ilo_dev = pdev;
+ spin_lock_init(&ilo_hw->alloc_lock);
+ spin_lock_init(&ilo_hw->fifo_lock);
+ spin_lock_init(&ilo_hw->open_lock);
+
+ error = pci_enable_device(pdev);
+ if (error)
+ goto free;
+
+ pci_set_master(pdev);
+
+ error = pci_request_regions(pdev, ILO_NAME);
+ if (error)
+ goto disable;
+
+ error = ilo_map_device(pdev, ilo_hw);
+ if (error)
+ goto free_regions;
+
+ pci_set_drvdata(pdev, ilo_hw);
+ clear_device(ilo_hw);
+
+ error = request_irq(pdev->irq, ilo_isr, IRQF_SHARED, "hpilo", ilo_hw);
+ if (error)
+ goto unmap;
+
+ ilo_enable_interrupts(ilo_hw);
+
+ cdev_init(&ilo_hw->cdev, &ilo_fops);
+ ilo_hw->cdev.owner = THIS_MODULE;
+ start = devnum * MAX_CCB;
+ error = cdev_add(&ilo_hw->cdev, MKDEV(ilo_major, start), MAX_CCB);
+ if (error) {
+ dev_err(&pdev->dev, "Could not add cdev\n");
+ goto remove_isr;
+ }
+
+ for (minor = 0 ; minor < MAX_CCB; minor++) {
+ struct device *dev;
+ dev = device_create(ilo_class, &pdev->dev,
+ MKDEV(ilo_major, minor), NULL,
+ "hpilo!d%dccb%d", devnum, minor);
+ if (IS_ERR(dev))
+ dev_err(&pdev->dev, "Could not create files\n");
+ }
+
+ return 0;
+remove_isr:
+ ilo_disable_interrupts(ilo_hw);
+ free_irq(pdev->irq, ilo_hw);
+unmap:
+ ilo_unmap_device(pdev, ilo_hw);
+free_regions:
+ pci_release_regions(pdev);
+disable:
+ pci_disable_device(pdev);
+free:
+ kfree(ilo_hw);
+out:
+ ilo_hwdev[devnum] = 0;
+ return error;
+}
+
+static struct pci_device_id ilo_devices[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_COMPAQ, 0xB204) },
+ { PCI_DEVICE(PCI_VENDOR_ID_HP, 0x3307) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, ilo_devices);
+
+static struct pci_driver ilo_driver = {
+ .name = ILO_NAME,
+ .id_table = ilo_devices,
+ .probe = ilo_probe,
+ .remove = __devexit_p(ilo_remove),
+};
+
+static int __init ilo_init(void)
+{
+ int error;
+ dev_t dev;
+
+ ilo_class = class_create(THIS_MODULE, "iLO");
+ if (IS_ERR(ilo_class)) {
+ error = PTR_ERR(ilo_class);
+ goto out;
+ }
+
+ error = alloc_chrdev_region(&dev, 0, MAX_OPEN, ILO_NAME);
+ if (error)
+ goto class_destroy;
+
+ ilo_major = MAJOR(dev);
+
+ error = pci_register_driver(&ilo_driver);
+ if (error)
+ goto chr_remove;
+
+ return 0;
+chr_remove:
+ unregister_chrdev_region(dev, MAX_OPEN);
+class_destroy:
+ class_destroy(ilo_class);
+out:
+ return error;
+}
+
+static void __exit ilo_exit(void)
+{
+ pci_unregister_driver(&ilo_driver);
+ unregister_chrdev_region(MKDEV(ilo_major, 0), MAX_OPEN);
+ class_destroy(ilo_class);
+}
+
+MODULE_VERSION("1.2");
+MODULE_ALIAS(ILO_NAME);
+MODULE_DESCRIPTION(ILO_NAME);
+MODULE_AUTHOR("David Altobelli <david.altobelli@hp.com>");
+MODULE_LICENSE("GPL v2");
+
+module_init(ilo_init);
+module_exit(ilo_exit);
diff --git a/drivers/misc/hpilo.h b/drivers/misc/hpilo.h
new file mode 100644
index 00000000..54e43adb
--- /dev/null
+++ b/drivers/misc/hpilo.h
@@ -0,0 +1,212 @@
+/*
+ * linux/drivers/char/hpilo.h
+ *
+ * Copyright (C) 2008 Hewlett-Packard Development Company, L.P.
+ * David Altobelli <david.altobelli@hp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __HPILO_H
+#define __HPILO_H
+
+#define ILO_NAME "hpilo"
+
+/* max number of open channel control blocks per device, hw limited to 32 */
+#define MAX_CCB 8
+/* max number of supported devices */
+#define MAX_ILO_DEV 1
+/* max number of files */
+#define MAX_OPEN (MAX_CCB * MAX_ILO_DEV)
+/* total wait time in usec */
+#define MAX_WAIT_TIME 10000
+/* per spin wait time in usec */
+#define WAIT_TIME 10
+/* spin counter for open/close delay */
+#define MAX_WAIT (MAX_WAIT_TIME / WAIT_TIME)
+
+/*
+ * Per device, used to track global memory allocations.
+ */
+struct ilo_hwinfo {
+ /* mmio registers on device */
+ char __iomem *mmio_vaddr;
+
+ /* doorbell registers on device */
+ char __iomem *db_vaddr;
+
+ /* shared memory on device used for channel control blocks */
+ char __iomem *ram_vaddr;
+
+ /* files corresponding to this device */
+ struct ccb_data *ccb_alloc[MAX_CCB];
+
+ struct pci_dev *ilo_dev;
+
+ /*
+ * open_lock serializes ccb_cnt during open and close
+ * [ irq disabled ]
+ * -> alloc_lock used when adding/removing/searching ccb_alloc,
+ * which represents all ccbs open on the device
+ * --> fifo_lock controls access to fifo queues shared with hw
+ *
+ * Locks must be taken in this order, but open_lock and alloc_lock
+ * are optional, they do not need to be held in order to take a
+ * lower level lock.
+ */
+ spinlock_t open_lock;
+ spinlock_t alloc_lock;
+ spinlock_t fifo_lock;
+
+ struct cdev cdev;
+};
+
+/* offset from mmio_vaddr for enabling doorbell interrupts */
+#define DB_IRQ 0xB2
+/* offset from mmio_vaddr for outbound communications */
+#define DB_OUT 0xD4
+/* DB_OUT reset bit */
+#define DB_RESET 26
+
+/*
+ * Channel control block. Used to manage hardware queues.
+ * The format must match hw's version. The hw ccb is 128 bytes,
+ * but the context area shouldn't be touched by the driver.
+ */
+#define ILOSW_CCB_SZ 64
+#define ILOHW_CCB_SZ 128
+struct ccb {
+ union {
+ char *send_fifobar;
+ u64 send_fifobar_pa;
+ } ccb_u1;
+ union {
+ char *send_desc;
+ u64 send_desc_pa;
+ } ccb_u2;
+ u64 send_ctrl;
+
+ union {
+ char *recv_fifobar;
+ u64 recv_fifobar_pa;
+ } ccb_u3;
+ union {
+ char *recv_desc;
+ u64 recv_desc_pa;
+ } ccb_u4;
+ u64 recv_ctrl;
+
+ union {
+ char __iomem *db_base;
+ u64 padding5;
+ } ccb_u5;
+
+ u64 channel;
+
+ /* unused context area (64 bytes) */
+};
+
+/* ccb queue parameters */
+#define SENDQ 1
+#define RECVQ 2
+#define NR_QENTRY 4
+#define L2_QENTRY_SZ 12
+
+/* ccb ctrl bitfields */
+#define CTRL_BITPOS_L2SZ 0
+#define CTRL_BITPOS_FIFOINDEXMASK 4
+#define CTRL_BITPOS_DESCLIMIT 18
+#define CTRL_BITPOS_A 30
+#define CTRL_BITPOS_G 31
+
+/* ccb doorbell macros */
+#define L2_DB_SIZE 14
+#define ONE_DB_SIZE (1 << L2_DB_SIZE)
+
+/*
+ * Per fd structure used to track the ccb allocated to that dev file.
+ */
+struct ccb_data {
+ /* software version of ccb, using virtual addrs */
+ struct ccb driver_ccb;
+
+ /* hardware version of ccb, using physical addrs */
+ struct ccb ilo_ccb;
+
+ /* hardware ccb is written to this shared mapped device memory */
+ struct ccb __iomem *mapped_ccb;
+
+ /* dma'able memory used for send/recv queues */
+ void *dma_va;
+ dma_addr_t dma_pa;
+ size_t dma_size;
+
+ /* pointer to hardware device info */
+ struct ilo_hwinfo *ilo_hw;
+
+ /* queue for this ccb to wait for recv data */
+ wait_queue_head_t ccb_waitq;
+
+ /* usage count, to allow for shared ccb's */
+ int ccb_cnt;
+
+ /* open wanted exclusive access to this ccb */
+ int ccb_excl;
+};
+
+/*
+ * FIFO queue structure, shared with hw.
+ */
+#define ILO_START_ALIGN 4096
+#define ILO_CACHE_SZ 128
+struct fifo {
+ u64 nrents; /* user requested number of fifo entries */
+ u64 imask; /* mask to extract valid fifo index */
+ u64 merge; /* O/C bits to merge in during enqueue operation */
+ u64 reset; /* set to non-zero when the target device resets */
+ u8 pad_0[ILO_CACHE_SZ - (sizeof(u64) * 4)];
+
+ u64 head;
+ u8 pad_1[ILO_CACHE_SZ - (sizeof(u64))];
+
+ u64 tail;
+ u8 pad_2[ILO_CACHE_SZ - (sizeof(u64))];
+
+ u64 fifobar[1];
+};
+
+/* convert between struct fifo, and the fifobar, which is saved in the ccb */
+#define FIFOHANDLESIZE (sizeof(struct fifo) - sizeof(u64))
+#define FIFOBARTOHANDLE(_fifo) \
+ ((struct fifo *)(((char *)(_fifo)) - FIFOHANDLESIZE))
+
+/* the number of qwords to consume from the entry descriptor */
+#define ENTRY_BITPOS_QWORDS 0
+/* descriptor index number (within a specified queue) */
+#define ENTRY_BITPOS_DESCRIPTOR 10
+/* state bit, fifo entry consumed by consumer */
+#define ENTRY_BITPOS_C 22
+/* state bit, fifo entry is occupied */
+#define ENTRY_BITPOS_O 23
+
+#define ENTRY_BITS_QWORDS 10
+#define ENTRY_BITS_DESCRIPTOR 12
+#define ENTRY_BITS_C 1
+#define ENTRY_BITS_O 1
+#define ENTRY_BITS_TOTAL \
+ (ENTRY_BITS_C + ENTRY_BITS_O + \
+ ENTRY_BITS_QWORDS + ENTRY_BITS_DESCRIPTOR)
+
+/* extract various entry fields */
+#define ENTRY_MASK ((1 << ENTRY_BITS_TOTAL) - 1)
+#define ENTRY_MASK_C (((1 << ENTRY_BITS_C) - 1) << ENTRY_BITPOS_C)
+#define ENTRY_MASK_O (((1 << ENTRY_BITS_O) - 1) << ENTRY_BITPOS_O)
+#define ENTRY_MASK_QWORDS \
+ (((1 << ENTRY_BITS_QWORDS) - 1) << ENTRY_BITPOS_QWORDS)
+#define ENTRY_MASK_DESCRIPTOR \
+ (((1 << ENTRY_BITS_DESCRIPTOR) - 1) << ENTRY_BITPOS_DESCRIPTOR)
+
+#define ENTRY_MASK_NOSTATE (ENTRY_MASK >> (ENTRY_BITS_C + ENTRY_BITS_O))
+
+#endif /* __HPILO_H */
diff --git a/drivers/misc/ibmasm/Makefile b/drivers/misc/ibmasm/Makefile
new file mode 100644
index 00000000..9e63ade5
--- /dev/null
+++ b/drivers/misc/ibmasm/Makefile
@@ -0,0 +1,15 @@
+
+obj-$(CONFIG_IBM_ASM) := ibmasm.o
+
+ibmasm-y := module.o \
+ ibmasmfs.o \
+ event.o \
+ command.o \
+ remote.o \
+ heartbeat.o \
+ r_heartbeat.o \
+ dot_command.o \
+ lowlevel.o
+
+ibmasm-$(CONFIG_SERIAL_8250) += uart.o
+
diff --git a/drivers/misc/ibmasm/command.c b/drivers/misc/ibmasm/command.c
new file mode 100644
index 00000000..7d56f45d
--- /dev/null
+++ b/drivers/misc/ibmasm/command.c
@@ -0,0 +1,187 @@
+
+/*
+ * IBM ASM Service Processor Device Driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2004
+ *
+ * Author: Max Asböck <amax@us.ibm.com>
+ *
+ */
+
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include "ibmasm.h"
+#include "lowlevel.h"
+
+static void exec_next_command(struct service_processor *sp);
+
+static atomic_t command_count = ATOMIC_INIT(0);
+
+struct command *ibmasm_new_command(struct service_processor *sp, size_t buffer_size)
+{
+ struct command *cmd;
+
+ if (buffer_size > IBMASM_CMD_MAX_BUFFER_SIZE)
+ return NULL;
+
+ cmd = kzalloc(sizeof(struct command), GFP_KERNEL);
+ if (cmd == NULL)
+ return NULL;
+
+
+ cmd->buffer = kzalloc(buffer_size, GFP_KERNEL);
+ if (cmd->buffer == NULL) {
+ kfree(cmd);
+ return NULL;
+ }
+ cmd->buffer_size = buffer_size;
+
+ kref_init(&cmd->kref);
+ cmd->lock = &sp->lock;
+
+ cmd->status = IBMASM_CMD_PENDING;
+ init_waitqueue_head(&cmd->wait);
+ INIT_LIST_HEAD(&cmd->queue_node);
+
+ atomic_inc(&command_count);
+ dbg("command count: %d\n", atomic_read(&command_count));
+
+ return cmd;
+}
+
+void ibmasm_free_command(struct kref *kref)
+{
+ struct command *cmd = to_command(kref);
+
+ list_del(&cmd->queue_node);
+ atomic_dec(&command_count);
+ dbg("command count: %d\n", atomic_read(&command_count));
+ kfree(cmd->buffer);
+ kfree(cmd);
+}
+
+static void enqueue_command(struct service_processor *sp, struct command *cmd)
+{
+ list_add_tail(&cmd->queue_node, &sp->command_queue);
+}
+
+static struct command *dequeue_command(struct service_processor *sp)
+{
+ struct command *cmd;
+ struct list_head *next;
+
+ if (list_empty(&sp->command_queue))
+ return NULL;
+
+ next = sp->command_queue.next;
+ list_del_init(next);
+ cmd = list_entry(next, struct command, queue_node);
+
+ return cmd;
+}
+
+static inline void do_exec_command(struct service_processor *sp)
+{
+ char tsbuf[32];
+
+ dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf));
+
+ if (ibmasm_send_i2o_message(sp)) {
+ sp->current_command->status = IBMASM_CMD_FAILED;
+ wake_up(&sp->current_command->wait);
+ command_put(sp->current_command);
+ exec_next_command(sp);
+ }
+}
+
+/**
+ * exec_command
+ * send a command to a service processor
+ * Commands are executed sequentially. One command (sp->current_command)
+ * is sent to the service processor. Once the interrupt handler gets a
+ * message of type command_response, the message is copied into
+ * the current commands buffer,
+ */
+void ibmasm_exec_command(struct service_processor *sp, struct command *cmd)
+{
+ unsigned long flags;
+ char tsbuf[32];
+
+ dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf));
+
+ spin_lock_irqsave(&sp->lock, flags);
+
+ if (!sp->current_command) {
+ sp->current_command = cmd;
+ command_get(sp->current_command);
+ spin_unlock_irqrestore(&sp->lock, flags);
+ do_exec_command(sp);
+ } else {
+ enqueue_command(sp, cmd);
+ spin_unlock_irqrestore(&sp->lock, flags);
+ }
+}
+
+static void exec_next_command(struct service_processor *sp)
+{
+ unsigned long flags;
+ char tsbuf[32];
+
+ dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf));
+
+ spin_lock_irqsave(&sp->lock, flags);
+ sp->current_command = dequeue_command(sp);
+ if (sp->current_command) {
+ command_get(sp->current_command);
+ spin_unlock_irqrestore(&sp->lock, flags);
+ do_exec_command(sp);
+ } else {
+ spin_unlock_irqrestore(&sp->lock, flags);
+ }
+}
+
+/**
+ * Sleep until a command has failed or a response has been received
+ * and the command status been updated by the interrupt handler.
+ * (see receive_response).
+ */
+void ibmasm_wait_for_response(struct command *cmd, int timeout)
+{
+ wait_event_interruptible_timeout(cmd->wait,
+ cmd->status == IBMASM_CMD_COMPLETE ||
+ cmd->status == IBMASM_CMD_FAILED,
+ timeout * HZ);
+}
+
+/**
+ * receive_command_response
+ * called by the interrupt handler when a dot command of type command_response
+ * was received.
+ */
+void ibmasm_receive_command_response(struct service_processor *sp, void *response, size_t size)
+{
+ struct command *cmd = sp->current_command;
+
+ if (!sp->current_command)
+ return;
+
+ memcpy_fromio(cmd->buffer, response, min(size, cmd->buffer_size));
+ cmd->status = IBMASM_CMD_COMPLETE;
+ wake_up(&sp->current_command->wait);
+ command_put(sp->current_command);
+ exec_next_command(sp);
+}
diff --git a/drivers/misc/ibmasm/dot_command.c b/drivers/misc/ibmasm/dot_command.c
new file mode 100644
index 00000000..d7b2ca35
--- /dev/null
+++ b/drivers/misc/ibmasm/dot_command.c
@@ -0,0 +1,152 @@
+/*
+ * IBM ASM Service Processor Device Driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2004
+ *
+ * Author: Max Asböck <amax@us.ibm.com>
+ *
+ */
+
+#include "ibmasm.h"
+#include "dot_command.h"
+
+/**
+ * Dispatch an incoming message to the specific handler for the message.
+ * Called from interrupt context.
+ */
+void ibmasm_receive_message(struct service_processor *sp, void *message, int message_size)
+{
+ u32 size;
+ struct dot_command_header *header = (struct dot_command_header *)message;
+
+ if (message_size == 0)
+ return;
+
+ size = get_dot_command_size(message);
+ if (size == 0)
+ return;
+
+ if (size > message_size)
+ size = message_size;
+
+ switch (header->type) {
+ case sp_event:
+ ibmasm_receive_event(sp, message, size);
+ break;
+ case sp_command_response:
+ ibmasm_receive_command_response(sp, message, size);
+ break;
+ case sp_heartbeat:
+ ibmasm_receive_heartbeat(sp, message, size);
+ break;
+ default:
+ dev_err(sp->dev, "Received unknown message from service processor\n");
+ }
+}
+
+
+#define INIT_BUFFER_SIZE 32
+
+
+/**
+ * send the 4.3.5.10 dot command (driver VPD) to the service processor
+ */
+int ibmasm_send_driver_vpd(struct service_processor *sp)
+{
+ struct command *command;
+ struct dot_command_header *header;
+ u8 *vpd_command;
+ u8 *vpd_data;
+ int result = 0;
+
+ command = ibmasm_new_command(sp, INIT_BUFFER_SIZE);
+ if (command == NULL)
+ return -ENOMEM;
+
+ header = (struct dot_command_header *)command->buffer;
+ header->type = sp_write;
+ header->command_size = 4;
+ header->data_size = 16;
+ header->status = 0;
+ header->reserved = 0;
+
+ vpd_command = command->buffer + sizeof(struct dot_command_header);
+ vpd_command[0] = 0x4;
+ vpd_command[1] = 0x3;
+ vpd_command[2] = 0x5;
+ vpd_command[3] = 0xa;
+
+ vpd_data = vpd_command + header->command_size;
+ vpd_data[0] = 0;
+ strcat(vpd_data, IBMASM_DRIVER_VPD);
+ vpd_data[10] = 0;
+ vpd_data[15] = 0;
+
+ ibmasm_exec_command(sp, command);
+ ibmasm_wait_for_response(command, IBMASM_CMD_TIMEOUT_NORMAL);
+
+ if (command->status != IBMASM_CMD_COMPLETE)
+ result = -ENODEV;
+
+ command_put(command);
+
+ return result;
+}
+
+struct os_state_command {
+ struct dot_command_header header;
+ unsigned char command[3];
+ unsigned char data;
+};
+
+/**
+ * send the 4.3.6 dot command (os state) to the service processor
+ * During driver init this function is called with os state "up".
+ * This causes the service processor to start sending heartbeats the
+ * driver.
+ * During driver exit the function is called with os state "down",
+ * causing the service processor to stop the heartbeats.
+ */
+int ibmasm_send_os_state(struct service_processor *sp, int os_state)
+{
+ struct command *cmd;
+ struct os_state_command *os_state_cmd;
+ int result = 0;
+
+ cmd = ibmasm_new_command(sp, sizeof(struct os_state_command));
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ os_state_cmd = (struct os_state_command *)cmd->buffer;
+ os_state_cmd->header.type = sp_write;
+ os_state_cmd->header.command_size = 3;
+ os_state_cmd->header.data_size = 1;
+ os_state_cmd->header.status = 0;
+ os_state_cmd->command[0] = 4;
+ os_state_cmd->command[1] = 3;
+ os_state_cmd->command[2] = 6;
+ os_state_cmd->data = os_state;
+
+ ibmasm_exec_command(sp, cmd);
+ ibmasm_wait_for_response(cmd, IBMASM_CMD_TIMEOUT_NORMAL);
+
+ if (cmd->status != IBMASM_CMD_COMPLETE)
+ result = -ENODEV;
+
+ command_put(cmd);
+ return result;
+}
diff --git a/drivers/misc/ibmasm/dot_command.h b/drivers/misc/ibmasm/dot_command.h
new file mode 100644
index 00000000..fc9fc9d4
--- /dev/null
+++ b/drivers/misc/ibmasm/dot_command.h
@@ -0,0 +1,78 @@
+/*
+ * IBM ASM Service Processor Device Driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2004
+ *
+ * Author: Max Asböck <amax@us.ibm.com>
+ *
+ */
+
+#ifndef __DOT_COMMAND_H__
+#define __DOT_COMMAND_H__
+
+/*
+ * dot commands are the protocol used to communicate with the service
+ * processor.
+ * They consist of header, a command of variable length and data of
+ * variable length.
+ */
+
+/* dot command types */
+#define sp_write 0
+#define sp_write_next 1
+#define sp_read 2
+#define sp_read_next 3
+#define sp_command_response 4
+#define sp_event 5
+#define sp_heartbeat 6
+
+#pragma pack(1)
+struct dot_command_header {
+ u8 type;
+ u8 command_size;
+ u16 data_size;
+ u8 status;
+ u8 reserved;
+};
+#pragma pack()
+
+static inline size_t get_dot_command_size(void *buffer)
+{
+ struct dot_command_header *cmd = (struct dot_command_header *)buffer;
+ return sizeof(struct dot_command_header) + cmd->command_size + cmd->data_size;
+}
+
+static inline unsigned int get_dot_command_timeout(void *buffer)
+{
+ struct dot_command_header *header = (struct dot_command_header *)buffer;
+ unsigned char *cmd = buffer + sizeof(struct dot_command_header);
+
+ /* dot commands 6.3.1, 7.1 and 8.x need a longer timeout */
+
+ if (header->command_size == 3) {
+ if ((cmd[0] == 6) && (cmd[1] == 3) && (cmd[2] == 1))
+ return IBMASM_CMD_TIMEOUT_EXTRA;
+ } else if (header->command_size == 2) {
+ if ((cmd[0] == 7) && (cmd[1] == 1))
+ return IBMASM_CMD_TIMEOUT_EXTRA;
+ if (cmd[0] == 8)
+ return IBMASM_CMD_TIMEOUT_EXTRA;
+ }
+ return IBMASM_CMD_TIMEOUT_NORMAL;
+}
+
+#endif /* __DOT_COMMAND_H__ */
diff --git a/drivers/misc/ibmasm/event.c b/drivers/misc/ibmasm/event.c
new file mode 100644
index 00000000..8e540f4e
--- /dev/null
+++ b/drivers/misc/ibmasm/event.c
@@ -0,0 +1,177 @@
+
+/*
+ * IBM ASM Service Processor Device Driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2004
+ *
+ * Author: Max Asböck <amax@us.ibm.com>
+ *
+ */
+
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include "ibmasm.h"
+#include "lowlevel.h"
+
+/*
+ * ASM service processor event handling routines.
+ *
+ * Events are signalled to the device drivers through interrupts.
+ * They have the format of dot commands, with the type field set to
+ * sp_event.
+ * The driver does not interpret the events, it simply stores them in a
+ * circular buffer.
+ */
+
+static void wake_up_event_readers(struct service_processor *sp)
+{
+ struct event_reader *reader;
+
+ list_for_each_entry(reader, &sp->event_buffer->readers, node)
+ wake_up_interruptible(&reader->wait);
+}
+
+/**
+ * receive_event
+ * Called by the interrupt handler when a dot command of type sp_event is
+ * received.
+ * Store the event in the circular event buffer, wake up any sleeping
+ * event readers.
+ * There is no reader marker in the buffer, therefore readers are
+ * responsible for keeping up with the writer, or they will lose events.
+ */
+void ibmasm_receive_event(struct service_processor *sp, void *data, unsigned int data_size)
+{
+ struct event_buffer *buffer = sp->event_buffer;
+ struct ibmasm_event *event;
+ unsigned long flags;
+
+ data_size = min(data_size, IBMASM_EVENT_MAX_SIZE);
+
+ spin_lock_irqsave(&sp->lock, flags);
+ /* copy the event into the next slot in the circular buffer */
+ event = &buffer->events[buffer->next_index];
+ memcpy_fromio(event->data, data, data_size);
+ event->data_size = data_size;
+ event->serial_number = buffer->next_serial_number;
+
+ /* advance indices in the buffer */
+ buffer->next_index = (buffer->next_index + 1) % IBMASM_NUM_EVENTS;
+ buffer->next_serial_number++;
+ spin_unlock_irqrestore(&sp->lock, flags);
+
+ wake_up_event_readers(sp);
+}
+
+static inline int event_available(struct event_buffer *b, struct event_reader *r)
+{
+ return (r->next_serial_number < b->next_serial_number);
+}
+
+/**
+ * get_next_event
+ * Called by event readers (initiated from user space through the file
+ * system).
+ * Sleeps until a new event is available.
+ */
+int ibmasm_get_next_event(struct service_processor *sp, struct event_reader *reader)
+{
+ struct event_buffer *buffer = sp->event_buffer;
+ struct ibmasm_event *event;
+ unsigned int index;
+ unsigned long flags;
+
+ reader->cancelled = 0;
+
+ if (wait_event_interruptible(reader->wait,
+ event_available(buffer, reader) || reader->cancelled))
+ return -ERESTARTSYS;
+
+ if (!event_available(buffer, reader))
+ return 0;
+
+ spin_lock_irqsave(&sp->lock, flags);
+
+ index = buffer->next_index;
+ event = &buffer->events[index];
+ while (event->serial_number < reader->next_serial_number) {
+ index = (index + 1) % IBMASM_NUM_EVENTS;
+ event = &buffer->events[index];
+ }
+ memcpy(reader->data, event->data, event->data_size);
+ reader->data_size = event->data_size;
+ reader->next_serial_number = event->serial_number + 1;
+
+ spin_unlock_irqrestore(&sp->lock, flags);
+
+ return event->data_size;
+}
+
+void ibmasm_cancel_next_event(struct event_reader *reader)
+{
+ reader->cancelled = 1;
+ wake_up_interruptible(&reader->wait);
+}
+
+void ibmasm_event_reader_register(struct service_processor *sp, struct event_reader *reader)
+{
+ unsigned long flags;
+
+ reader->next_serial_number = sp->event_buffer->next_serial_number;
+ init_waitqueue_head(&reader->wait);
+ spin_lock_irqsave(&sp->lock, flags);
+ list_add(&reader->node, &sp->event_buffer->readers);
+ spin_unlock_irqrestore(&sp->lock, flags);
+}
+
+void ibmasm_event_reader_unregister(struct service_processor *sp, struct event_reader *reader)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sp->lock, flags);
+ list_del(&reader->node);
+ spin_unlock_irqrestore(&sp->lock, flags);
+}
+
+int ibmasm_event_buffer_init(struct service_processor *sp)
+{
+ struct event_buffer *buffer;
+ struct ibmasm_event *event;
+ int i;
+
+ buffer = kmalloc(sizeof(struct event_buffer), GFP_KERNEL);
+ if (!buffer)
+ return 1;
+
+ buffer->next_index = 0;
+ buffer->next_serial_number = 1;
+
+ event = buffer->events;
+ for (i=0; i<IBMASM_NUM_EVENTS; i++, event++)
+ event->serial_number = 0;
+
+ INIT_LIST_HEAD(&buffer->readers);
+
+ sp->event_buffer = buffer;
+
+ return 0;
+}
+
+void ibmasm_event_buffer_exit(struct service_processor *sp)
+{
+ kfree(sp->event_buffer);
+}
diff --git a/drivers/misc/ibmasm/heartbeat.c b/drivers/misc/ibmasm/heartbeat.c
new file mode 100644
index 00000000..90746378
--- /dev/null
+++ b/drivers/misc/ibmasm/heartbeat.c
@@ -0,0 +1,101 @@
+
+/*
+ * IBM ASM Service Processor Device Driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2004
+ *
+ * Author: Max Asböck <amax@us.ibm.com>
+ *
+ */
+
+#include <linux/notifier.h>
+#include "ibmasm.h"
+#include "dot_command.h"
+#include "lowlevel.h"
+
+static int suspend_heartbeats = 0;
+
+/*
+ * Once the driver indicates to the service processor that it is running
+ * - see send_os_state() - the service processor sends periodic heartbeats
+ * to the driver. The driver must respond to the heartbeats or else the OS
+ * will be rebooted.
+ * In the case of a panic the interrupt handler continues to work and thus
+ * continues to respond to heartbeats, making the service processor believe
+ * the OS is still running and thus preventing a reboot.
+ * To prevent this from happening a callback is added the panic_notifier_list.
+ * Before responding to a heartbeat the driver checks if a panic has happened,
+ * if yes it suspends heartbeat, causing the service processor to reboot as
+ * expected.
+ */
+static int panic_happened(struct notifier_block *n, unsigned long val, void *v)
+{
+ suspend_heartbeats = 1;
+ return 0;
+}
+
+static struct notifier_block panic_notifier = { panic_happened, NULL, 1 };
+
+void ibmasm_register_panic_notifier(void)
+{
+ atomic_notifier_chain_register(&panic_notifier_list, &panic_notifier);
+}
+
+void ibmasm_unregister_panic_notifier(void)
+{
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &panic_notifier);
+}
+
+
+int ibmasm_heartbeat_init(struct service_processor *sp)
+{
+ sp->heartbeat = ibmasm_new_command(sp, HEARTBEAT_BUFFER_SIZE);
+ if (sp->heartbeat == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void ibmasm_heartbeat_exit(struct service_processor *sp)
+{
+ char tsbuf[32];
+
+ dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf));
+ ibmasm_wait_for_response(sp->heartbeat, IBMASM_CMD_TIMEOUT_NORMAL);
+ dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf));
+ suspend_heartbeats = 1;
+ command_put(sp->heartbeat);
+}
+
+void ibmasm_receive_heartbeat(struct service_processor *sp, void *message, size_t size)
+{
+ struct command *cmd = sp->heartbeat;
+ struct dot_command_header *header = (struct dot_command_header *)cmd->buffer;
+ char tsbuf[32];
+
+ dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf));
+ if (suspend_heartbeats)
+ return;
+
+ /* return the received dot command to sender */
+ cmd->status = IBMASM_CMD_PENDING;
+ size = min(size, cmd->buffer_size);
+ memcpy_fromio(cmd->buffer, message, size);
+ header->type = sp_write;
+ ibmasm_exec_command(sp, cmd);
+}
diff --git a/drivers/misc/ibmasm/i2o.h b/drivers/misc/ibmasm/i2o.h
new file mode 100644
index 00000000..2e9566da
--- /dev/null
+++ b/drivers/misc/ibmasm/i2o.h
@@ -0,0 +1,77 @@
+/*
+ * IBM ASM Service Processor Device Driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2004
+ *
+ * Author: Max Asböck <amax@us.ibm.com>
+ *
+ */
+
+#pragma pack(1)
+struct i2o_header {
+ u8 version;
+ u8 message_flags;
+ u16 message_size;
+ u8 target;
+ u8 initiator_and_target;
+ u8 initiator;
+ u8 function;
+ u32 initiator_context;
+};
+#pragma pack()
+
+#define I2O_HEADER_TEMPLATE \
+ { .version = 0x01, \
+ .message_flags = 0x00, \
+ .function = 0xFF, \
+ .initiator = 0x00, \
+ .initiator_and_target = 0x40, \
+ .target = 0x00, \
+ .initiator_context = 0x0 }
+
+#define I2O_MESSAGE_SIZE 0x1000
+#define I2O_COMMAND_SIZE (I2O_MESSAGE_SIZE - sizeof(struct i2o_header))
+
+#pragma pack(1)
+struct i2o_message {
+ struct i2o_header header;
+ void *data;
+};
+#pragma pack()
+
+static inline unsigned short outgoing_message_size(unsigned int data_size)
+{
+ unsigned int size;
+ unsigned short i2o_size;
+
+ if (data_size > I2O_COMMAND_SIZE)
+ data_size = I2O_COMMAND_SIZE;
+
+ size = sizeof(struct i2o_header) + data_size;
+
+ i2o_size = size / sizeof(u32);
+
+ if (size % sizeof(u32))
+ i2o_size++;
+
+ return i2o_size;
+}
+
+static inline u32 incoming_data_size(struct i2o_message *i2o_message)
+{
+ return (sizeof(u32) * i2o_message->header.message_size);
+}
diff --git a/drivers/misc/ibmasm/ibmasm.h b/drivers/misc/ibmasm/ibmasm.h
new file mode 100644
index 00000000..9b083448
--- /dev/null
+++ b/drivers/misc/ibmasm/ibmasm.h
@@ -0,0 +1,220 @@
+
+/*
+ * IBM ASM Service Processor Device Driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2004
+ *
+ * Author: Max Asböck <amax@us.ibm.com>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/kref.h>
+#include <linux/device.h>
+#include <linux/input.h>
+
+/* Driver identification */
+#define DRIVER_NAME "ibmasm"
+#define DRIVER_VERSION "1.0"
+#define DRIVER_AUTHOR "Max Asbock <masbock@us.ibm.com>, Vernon Mauery <vernux@us.ibm.com>"
+#define DRIVER_DESC "IBM ASM Service Processor Driver"
+
+#define err(msg) printk(KERN_ERR "%s: " msg "\n", DRIVER_NAME)
+#define info(msg) printk(KERN_INFO "%s: " msg "\n", DRIVER_NAME)
+
+extern int ibmasm_debug;
+#define dbg(STR, ARGS...) \
+ do { \
+ if (ibmasm_debug) \
+ printk(KERN_DEBUG STR , ##ARGS); \
+ } while (0)
+
+static inline char *get_timestamp(char *buf)
+{
+ struct timeval now;
+ do_gettimeofday(&now);
+ sprintf(buf, "%lu.%lu", now.tv_sec, now.tv_usec);
+ return buf;
+}
+
+#define IBMASM_CMD_PENDING 0
+#define IBMASM_CMD_COMPLETE 1
+#define IBMASM_CMD_FAILED 2
+
+#define IBMASM_CMD_TIMEOUT_NORMAL 45
+#define IBMASM_CMD_TIMEOUT_EXTRA 240
+
+#define IBMASM_CMD_MAX_BUFFER_SIZE 0x8000
+
+#define REVERSE_HEARTBEAT_TIMEOUT 120
+
+#define HEARTBEAT_BUFFER_SIZE 0x400
+
+#ifdef IA64
+#define IBMASM_DRIVER_VPD "Lin64 6.08 "
+#else
+#define IBMASM_DRIVER_VPD "Lin32 6.08 "
+#endif
+
+#define SYSTEM_STATE_OS_UP 5
+#define SYSTEM_STATE_OS_DOWN 4
+
+#define IBMASM_NAME_SIZE 16
+
+#define IBMASM_NUM_EVENTS 10
+#define IBMASM_EVENT_MAX_SIZE 2048u
+
+
+struct command {
+ struct list_head queue_node;
+ wait_queue_head_t wait;
+ unsigned char *buffer;
+ size_t buffer_size;
+ int status;
+ struct kref kref;
+ spinlock_t *lock;
+};
+#define to_command(c) container_of(c, struct command, kref)
+
+void ibmasm_free_command(struct kref *kref);
+static inline void command_put(struct command *cmd)
+{
+ unsigned long flags;
+ spinlock_t *lock = cmd->lock;
+
+ spin_lock_irqsave(lock, flags);
+ kref_put(&cmd->kref, ibmasm_free_command);
+ spin_unlock_irqrestore(lock, flags);
+}
+
+static inline void command_get(struct command *cmd)
+{
+ kref_get(&cmd->kref);
+}
+
+
+struct ibmasm_event {
+ unsigned int serial_number;
+ unsigned int data_size;
+ unsigned char data[IBMASM_EVENT_MAX_SIZE];
+};
+
+struct event_buffer {
+ struct ibmasm_event events[IBMASM_NUM_EVENTS];
+ unsigned int next_serial_number;
+ unsigned int next_index;
+ struct list_head readers;
+};
+
+struct event_reader {
+ int cancelled;
+ unsigned int next_serial_number;
+ wait_queue_head_t wait;
+ struct list_head node;
+ unsigned int data_size;
+ unsigned char data[IBMASM_EVENT_MAX_SIZE];
+};
+
+struct reverse_heartbeat {
+ wait_queue_head_t wait;
+ unsigned int stopped;
+};
+
+struct ibmasm_remote {
+ struct input_dev *keybd_dev;
+ struct input_dev *mouse_dev;
+};
+
+struct service_processor {
+ struct list_head node;
+ spinlock_t lock;
+ void __iomem *base_address;
+ unsigned int irq;
+ struct command *current_command;
+ struct command *heartbeat;
+ struct list_head command_queue;
+ struct event_buffer *event_buffer;
+ char dirname[IBMASM_NAME_SIZE];
+ char devname[IBMASM_NAME_SIZE];
+ unsigned int number;
+ struct ibmasm_remote remote;
+ int serial_line;
+ struct device *dev;
+};
+
+/* command processing */
+struct command *ibmasm_new_command(struct service_processor *sp, size_t buffer_size);
+void ibmasm_exec_command(struct service_processor *sp, struct command *cmd);
+void ibmasm_wait_for_response(struct command *cmd, int timeout);
+void ibmasm_receive_command_response(struct service_processor *sp, void *response, size_t size);
+
+/* event processing */
+int ibmasm_event_buffer_init(struct service_processor *sp);
+void ibmasm_event_buffer_exit(struct service_processor *sp);
+void ibmasm_receive_event(struct service_processor *sp, void *data, unsigned int data_size);
+void ibmasm_event_reader_register(struct service_processor *sp, struct event_reader *reader);
+void ibmasm_event_reader_unregister(struct service_processor *sp, struct event_reader *reader);
+int ibmasm_get_next_event(struct service_processor *sp, struct event_reader *reader);
+void ibmasm_cancel_next_event(struct event_reader *reader);
+
+/* heartbeat - from SP to OS */
+void ibmasm_register_panic_notifier(void);
+void ibmasm_unregister_panic_notifier(void);
+int ibmasm_heartbeat_init(struct service_processor *sp);
+void ibmasm_heartbeat_exit(struct service_processor *sp);
+void ibmasm_receive_heartbeat(struct service_processor *sp, void *message, size_t size);
+
+/* reverse heartbeat - from OS to SP */
+void ibmasm_init_reverse_heartbeat(struct service_processor *sp, struct reverse_heartbeat *rhb);
+int ibmasm_start_reverse_heartbeat(struct service_processor *sp, struct reverse_heartbeat *rhb);
+void ibmasm_stop_reverse_heartbeat(struct reverse_heartbeat *rhb);
+
+/* dot commands */
+void ibmasm_receive_message(struct service_processor *sp, void *data, int data_size);
+int ibmasm_send_driver_vpd(struct service_processor *sp);
+int ibmasm_send_os_state(struct service_processor *sp, int os_state);
+
+/* low level message processing */
+int ibmasm_send_i2o_message(struct service_processor *sp);
+irqreturn_t ibmasm_interrupt_handler(int irq, void * dev_id);
+
+/* remote console */
+void ibmasm_handle_mouse_interrupt(struct service_processor *sp);
+int ibmasm_init_remote_input_dev(struct service_processor *sp);
+void ibmasm_free_remote_input_dev(struct service_processor *sp);
+
+/* file system */
+int ibmasmfs_register(void);
+void ibmasmfs_unregister(void);
+void ibmasmfs_add_sp(struct service_processor *sp);
+
+/* uart */
+#ifdef CONFIG_SERIAL_8250
+void ibmasm_register_uart(struct service_processor *sp);
+void ibmasm_unregister_uart(struct service_processor *sp);
+#else
+#define ibmasm_register_uart(sp) do { } while(0)
+#define ibmasm_unregister_uart(sp) do { } while(0)
+#endif
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
new file mode 100644
index 00000000..6673e578
--- /dev/null
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -0,0 +1,630 @@
+/*
+ * IBM ASM Service Processor Device Driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2004
+ *
+ * Author: Max Asböck <amax@us.ibm.com>
+ *
+ */
+
+/*
+ * Parts of this code are based on an article by Jonathan Corbet
+ * that appeared in Linux Weekly News.
+ */
+
+
+/*
+ * The IBMASM file virtual filesystem. It creates the following hierarchy
+ * dynamically when mounted from user space:
+ *
+ * /ibmasm
+ * |-- 0
+ * | |-- command
+ * | |-- event
+ * | |-- reverse_heartbeat
+ * | `-- remote_video
+ * | |-- depth
+ * | |-- height
+ * | `-- width
+ * .
+ * .
+ * .
+ * `-- n
+ * |-- command
+ * |-- event
+ * |-- reverse_heartbeat
+ * `-- remote_video
+ * |-- depth
+ * |-- height
+ * `-- width
+ *
+ * For each service processor the following files are created:
+ *
+ * command: execute dot commands
+ * write: execute a dot command on the service processor
+ * read: return the result of a previously executed dot command
+ *
+ * events: listen for service processor events
+ * read: sleep (interruptible) until an event occurs
+ * write: wakeup sleeping event listener
+ *
+ * reverse_heartbeat: send a heartbeat to the service processor
+ * read: sleep (interruptible) until the reverse heartbeat fails
+ * write: wakeup sleeping heartbeat listener
+ *
+ * remote_video/width
+ * remote_video/height
+ * remote_video/width: control remote display settings
+ * write: set value
+ * read: read value
+ */
+
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include "ibmasm.h"
+#include "remote.h"
+#include "dot_command.h"
+
+#define IBMASMFS_MAGIC 0x66726f67
+
+static LIST_HEAD(service_processors);
+
+static struct inode *ibmasmfs_make_inode(struct super_block *sb, int mode);
+static void ibmasmfs_create_files (struct super_block *sb);
+static int ibmasmfs_fill_super (struct super_block *sb, void *data, int silent);
+
+
+static struct dentry *ibmasmfs_mount(struct file_system_type *fst,
+ int flags, const char *name, void *data)
+{
+ return mount_single(fst, flags, data, ibmasmfs_fill_super);
+}
+
+static const struct super_operations ibmasmfs_s_ops = {
+ .statfs = simple_statfs,
+ .drop_inode = generic_delete_inode,
+};
+
+static const struct file_operations *ibmasmfs_dir_ops = &simple_dir_operations;
+
+static struct file_system_type ibmasmfs_type = {
+ .owner = THIS_MODULE,
+ .name = "ibmasmfs",
+ .mount = ibmasmfs_mount,
+ .kill_sb = kill_litter_super,
+};
+
+static int ibmasmfs_fill_super (struct super_block *sb, void *data, int silent)
+{
+ struct inode *root;
+
+ sb->s_blocksize = PAGE_CACHE_SIZE;
+ sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ sb->s_magic = IBMASMFS_MAGIC;
+ sb->s_op = &ibmasmfs_s_ops;
+ sb->s_time_gran = 1;
+
+ root = ibmasmfs_make_inode (sb, S_IFDIR | 0500);
+ if (!root)
+ return -ENOMEM;
+
+ root->i_op = &simple_dir_inode_operations;
+ root->i_fop = ibmasmfs_dir_ops;
+
+ sb->s_root = d_make_root(root);
+ if (!sb->s_root)
+ return -ENOMEM;
+
+ ibmasmfs_create_files(sb);
+ return 0;
+}
+
+static struct inode *ibmasmfs_make_inode(struct super_block *sb, int mode)
+{
+ struct inode *ret = new_inode(sb);
+
+ if (ret) {
+ ret->i_ino = get_next_ino();
+ ret->i_mode = mode;
+ ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME;
+ }
+ return ret;
+}
+
+static struct dentry *ibmasmfs_create_file (struct super_block *sb,
+ struct dentry *parent,
+ const char *name,
+ const struct file_operations *fops,
+ void *data,
+ int mode)
+{
+ struct dentry *dentry;
+ struct inode *inode;
+
+ dentry = d_alloc_name(parent, name);
+ if (!dentry)
+ return NULL;
+
+ inode = ibmasmfs_make_inode(sb, S_IFREG | mode);
+ if (!inode) {
+ dput(dentry);
+ return NULL;
+ }
+
+ inode->i_fop = fops;
+ inode->i_private = data;
+
+ d_add(dentry, inode);
+ return dentry;
+}
+
+static struct dentry *ibmasmfs_create_dir (struct super_block *sb,
+ struct dentry *parent,
+ const char *name)
+{
+ struct dentry *dentry;
+ struct inode *inode;
+
+ dentry = d_alloc_name(parent, name);
+ if (!dentry)
+ return NULL;
+
+ inode = ibmasmfs_make_inode(sb, S_IFDIR | 0500);
+ if (!inode) {
+ dput(dentry);
+ return NULL;
+ }
+
+ inode->i_op = &simple_dir_inode_operations;
+ inode->i_fop = ibmasmfs_dir_ops;
+
+ d_add(dentry, inode);
+ return dentry;
+}
+
+int ibmasmfs_register(void)
+{
+ return register_filesystem(&ibmasmfs_type);
+}
+
+void ibmasmfs_unregister(void)
+{
+ unregister_filesystem(&ibmasmfs_type);
+}
+
+void ibmasmfs_add_sp(struct service_processor *sp)
+{
+ list_add(&sp->node, &service_processors);
+}
+
+/* struct to save state between command file operations */
+struct ibmasmfs_command_data {
+ struct service_processor *sp;
+ struct command *command;
+};
+
+/* struct to save state between event file operations */
+struct ibmasmfs_event_data {
+ struct service_processor *sp;
+ struct event_reader reader;
+ int active;
+};
+
+/* struct to save state between reverse heartbeat file operations */
+struct ibmasmfs_heartbeat_data {
+ struct service_processor *sp;
+ struct reverse_heartbeat heartbeat;
+ int active;
+};
+
+static int command_file_open(struct inode *inode, struct file *file)
+{
+ struct ibmasmfs_command_data *command_data;
+
+ if (!inode->i_private)
+ return -ENODEV;
+
+ command_data = kmalloc(sizeof(struct ibmasmfs_command_data), GFP_KERNEL);
+ if (!command_data)
+ return -ENOMEM;
+
+ command_data->command = NULL;
+ command_data->sp = inode->i_private;
+ file->private_data = command_data;
+ return 0;
+}
+
+static int command_file_close(struct inode *inode, struct file *file)
+{
+ struct ibmasmfs_command_data *command_data = file->private_data;
+
+ if (command_data->command)
+ command_put(command_data->command);
+
+ kfree(command_data);
+ return 0;
+}
+
+static ssize_t command_file_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
+{
+ struct ibmasmfs_command_data *command_data = file->private_data;
+ struct command *cmd;
+ int len;
+ unsigned long flags;
+
+ if (*offset < 0)
+ return -EINVAL;
+ if (count == 0 || count > IBMASM_CMD_MAX_BUFFER_SIZE)
+ return 0;
+ if (*offset != 0)
+ return 0;
+
+ spin_lock_irqsave(&command_data->sp->lock, flags);
+ cmd = command_data->command;
+ if (cmd == NULL) {
+ spin_unlock_irqrestore(&command_data->sp->lock, flags);
+ return 0;
+ }
+ command_data->command = NULL;
+ spin_unlock_irqrestore(&command_data->sp->lock, flags);
+
+ if (cmd->status != IBMASM_CMD_COMPLETE) {
+ command_put(cmd);
+ return -EIO;
+ }
+ len = min(count, cmd->buffer_size);
+ if (copy_to_user(buf, cmd->buffer, len)) {
+ command_put(cmd);
+ return -EFAULT;
+ }
+ command_put(cmd);
+
+ return len;
+}
+
+static ssize_t command_file_write(struct file *file, const char __user *ubuff, size_t count, loff_t *offset)
+{
+ struct ibmasmfs_command_data *command_data = file->private_data;
+ struct command *cmd;
+ unsigned long flags;
+
+ if (*offset < 0)
+ return -EINVAL;
+ if (count == 0 || count > IBMASM_CMD_MAX_BUFFER_SIZE)
+ return 0;
+ if (*offset != 0)
+ return 0;
+
+ /* commands are executed sequentially, only one command at a time */
+ if (command_data->command)
+ return -EAGAIN;
+
+ cmd = ibmasm_new_command(command_data->sp, count);
+ if (!cmd)
+ return -ENOMEM;
+
+ if (copy_from_user(cmd->buffer, ubuff, count)) {
+ command_put(cmd);
+ return -EFAULT;
+ }
+
+ spin_lock_irqsave(&command_data->sp->lock, flags);
+ if (command_data->command) {
+ spin_unlock_irqrestore(&command_data->sp->lock, flags);
+ command_put(cmd);
+ return -EAGAIN;
+ }
+ command_data->command = cmd;
+ spin_unlock_irqrestore(&command_data->sp->lock, flags);
+
+ ibmasm_exec_command(command_data->sp, cmd);
+ ibmasm_wait_for_response(cmd, get_dot_command_timeout(cmd->buffer));
+
+ return count;
+}
+
+static int event_file_open(struct inode *inode, struct file *file)
+{
+ struct ibmasmfs_event_data *event_data;
+ struct service_processor *sp;
+
+ if (!inode->i_private)
+ return -ENODEV;
+
+ sp = inode->i_private;
+
+ event_data = kmalloc(sizeof(struct ibmasmfs_event_data), GFP_KERNEL);
+ if (!event_data)
+ return -ENOMEM;
+
+ ibmasm_event_reader_register(sp, &event_data->reader);
+
+ event_data->sp = sp;
+ event_data->active = 0;
+ file->private_data = event_data;
+ return 0;
+}
+
+static int event_file_close(struct inode *inode, struct file *file)
+{
+ struct ibmasmfs_event_data *event_data = file->private_data;
+
+ ibmasm_event_reader_unregister(event_data->sp, &event_data->reader);
+ kfree(event_data);
+ return 0;
+}
+
+static ssize_t event_file_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
+{
+ struct ibmasmfs_event_data *event_data = file->private_data;
+ struct event_reader *reader = &event_data->reader;
+ struct service_processor *sp = event_data->sp;
+ int ret;
+ unsigned long flags;
+
+ if (*offset < 0)
+ return -EINVAL;
+ if (count == 0 || count > IBMASM_EVENT_MAX_SIZE)
+ return 0;
+ if (*offset != 0)
+ return 0;
+
+ spin_lock_irqsave(&sp->lock, flags);
+ if (event_data->active) {
+ spin_unlock_irqrestore(&sp->lock, flags);
+ return -EBUSY;
+ }
+ event_data->active = 1;
+ spin_unlock_irqrestore(&sp->lock, flags);
+
+ ret = ibmasm_get_next_event(sp, reader);
+ if (ret <= 0)
+ goto out;
+
+ if (count < reader->data_size) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (copy_to_user(buf, reader->data, reader->data_size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ ret = reader->data_size;
+
+out:
+ event_data->active = 0;
+ return ret;
+}
+
+static ssize_t event_file_write(struct file *file, const char __user *buf, size_t count, loff_t *offset)
+{
+ struct ibmasmfs_event_data *event_data = file->private_data;
+
+ if (*offset < 0)
+ return -EINVAL;
+ if (count != 1)
+ return 0;
+ if (*offset != 0)
+ return 0;
+
+ ibmasm_cancel_next_event(&event_data->reader);
+ return 0;
+}
+
+static int r_heartbeat_file_open(struct inode *inode, struct file *file)
+{
+ struct ibmasmfs_heartbeat_data *rhbeat;
+
+ if (!inode->i_private)
+ return -ENODEV;
+
+ rhbeat = kmalloc(sizeof(struct ibmasmfs_heartbeat_data), GFP_KERNEL);
+ if (!rhbeat)
+ return -ENOMEM;
+
+ rhbeat->sp = inode->i_private;
+ rhbeat->active = 0;
+ ibmasm_init_reverse_heartbeat(rhbeat->sp, &rhbeat->heartbeat);
+ file->private_data = rhbeat;
+ return 0;
+}
+
+static int r_heartbeat_file_close(struct inode *inode, struct file *file)
+{
+ struct ibmasmfs_heartbeat_data *rhbeat = file->private_data;
+
+ kfree(rhbeat);
+ return 0;
+}
+
+static ssize_t r_heartbeat_file_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
+{
+ struct ibmasmfs_heartbeat_data *rhbeat = file->private_data;
+ unsigned long flags;
+ int result;
+
+ if (*offset < 0)
+ return -EINVAL;
+ if (count == 0 || count > 1024)
+ return 0;
+ if (*offset != 0)
+ return 0;
+
+ /* allow only one reverse heartbeat per process */
+ spin_lock_irqsave(&rhbeat->sp->lock, flags);
+ if (rhbeat->active) {
+ spin_unlock_irqrestore(&rhbeat->sp->lock, flags);
+ return -EBUSY;
+ }
+ rhbeat->active = 1;
+ spin_unlock_irqrestore(&rhbeat->sp->lock, flags);
+
+ result = ibmasm_start_reverse_heartbeat(rhbeat->sp, &rhbeat->heartbeat);
+ rhbeat->active = 0;
+
+ return result;
+}
+
+static ssize_t r_heartbeat_file_write(struct file *file, const char __user *buf, size_t count, loff_t *offset)
+{
+ struct ibmasmfs_heartbeat_data *rhbeat = file->private_data;
+
+ if (*offset < 0)
+ return -EINVAL;
+ if (count != 1)
+ return 0;
+ if (*offset != 0)
+ return 0;
+
+ if (rhbeat->active)
+ ibmasm_stop_reverse_heartbeat(&rhbeat->heartbeat);
+
+ return 1;
+}
+
+static int remote_settings_file_close(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static ssize_t remote_settings_file_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
+{
+ void __iomem *address = (void __iomem *)file->private_data;
+ unsigned char *page;
+ int retval;
+ int len = 0;
+ unsigned int value;
+
+ if (*offset < 0)
+ return -EINVAL;
+ if (count == 0 || count > 1024)
+ return 0;
+ if (*offset != 0)
+ return 0;
+
+ page = (unsigned char *)__get_free_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ value = readl(address);
+ len = sprintf(page, "%d\n", value);
+
+ if (copy_to_user(buf, page, len)) {
+ retval = -EFAULT;
+ goto exit;
+ }
+ *offset += len;
+ retval = len;
+
+exit:
+ free_page((unsigned long)page);
+ return retval;
+}
+
+static ssize_t remote_settings_file_write(struct file *file, const char __user *ubuff, size_t count, loff_t *offset)
+{
+ void __iomem *address = (void __iomem *)file->private_data;
+ char *buff;
+ unsigned int value;
+
+ if (*offset < 0)
+ return -EINVAL;
+ if (count == 0 || count > 1024)
+ return 0;
+ if (*offset != 0)
+ return 0;
+
+ buff = kzalloc (count + 1, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+
+ if (copy_from_user(buff, ubuff, count)) {
+ kfree(buff);
+ return -EFAULT;
+ }
+
+ value = simple_strtoul(buff, NULL, 10);
+ writel(value, address);
+ kfree(buff);
+
+ return count;
+}
+
+static const struct file_operations command_fops = {
+ .open = command_file_open,
+ .release = command_file_close,
+ .read = command_file_read,
+ .write = command_file_write,
+ .llseek = generic_file_llseek,
+};
+
+static const struct file_operations event_fops = {
+ .open = event_file_open,
+ .release = event_file_close,
+ .read = event_file_read,
+ .write = event_file_write,
+ .llseek = generic_file_llseek,
+};
+
+static const struct file_operations r_heartbeat_fops = {
+ .open = r_heartbeat_file_open,
+ .release = r_heartbeat_file_close,
+ .read = r_heartbeat_file_read,
+ .write = r_heartbeat_file_write,
+ .llseek = generic_file_llseek,
+};
+
+static const struct file_operations remote_settings_fops = {
+ .open = simple_open,
+ .release = remote_settings_file_close,
+ .read = remote_settings_file_read,
+ .write = remote_settings_file_write,
+ .llseek = generic_file_llseek,
+};
+
+
+static void ibmasmfs_create_files (struct super_block *sb)
+{
+ struct list_head *entry;
+ struct service_processor *sp;
+
+ list_for_each(entry, &service_processors) {
+ struct dentry *dir;
+ struct dentry *remote_dir;
+ sp = list_entry(entry, struct service_processor, node);
+ dir = ibmasmfs_create_dir(sb, sb->s_root, sp->dirname);
+ if (!dir)
+ continue;
+
+ ibmasmfs_create_file(sb, dir, "command", &command_fops, sp, S_IRUSR|S_IWUSR);
+ ibmasmfs_create_file(sb, dir, "event", &event_fops, sp, S_IRUSR|S_IWUSR);
+ ibmasmfs_create_file(sb, dir, "reverse_heartbeat", &r_heartbeat_fops, sp, S_IRUSR|S_IWUSR);
+
+ remote_dir = ibmasmfs_create_dir(sb, dir, "remote_video");
+ if (!remote_dir)
+ continue;
+
+ ibmasmfs_create_file(sb, remote_dir, "width", &remote_settings_fops, (void *)display_width(sp), S_IRUSR|S_IWUSR);
+ ibmasmfs_create_file(sb, remote_dir, "height", &remote_settings_fops, (void *)display_height(sp), S_IRUSR|S_IWUSR);
+ ibmasmfs_create_file(sb, remote_dir, "depth", &remote_settings_fops, (void *)display_depth(sp), S_IRUSR|S_IWUSR);
+ }
+}
diff --git a/drivers/misc/ibmasm/lowlevel.c b/drivers/misc/ibmasm/lowlevel.c
new file mode 100644
index 00000000..5319ea26
--- /dev/null
+++ b/drivers/misc/ibmasm/lowlevel.c
@@ -0,0 +1,85 @@
+/*
+ * IBM ASM Service Processor Device Driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2004
+ *
+ * Author: Max Asböck <amax@us.ibm.com>
+ *
+ */
+
+#include "ibmasm.h"
+#include "lowlevel.h"
+#include "i2o.h"
+#include "dot_command.h"
+#include "remote.h"
+
+static struct i2o_header header = I2O_HEADER_TEMPLATE;
+
+
+int ibmasm_send_i2o_message(struct service_processor *sp)
+{
+ u32 mfa;
+ unsigned int command_size;
+ struct i2o_message *message;
+ struct command *command = sp->current_command;
+
+ mfa = get_mfa_inbound(sp->base_address);
+ if (!mfa)
+ return 1;
+
+ command_size = get_dot_command_size(command->buffer);
+ header.message_size = outgoing_message_size(command_size);
+
+ message = get_i2o_message(sp->base_address, mfa);
+
+ memcpy_toio(&message->header, &header, sizeof(struct i2o_header));
+ memcpy_toio(&message->data, command->buffer, command_size);
+
+ set_mfa_inbound(sp->base_address, mfa);
+
+ return 0;
+}
+
+irqreturn_t ibmasm_interrupt_handler(int irq, void * dev_id)
+{
+ u32 mfa;
+ struct service_processor *sp = (struct service_processor *)dev_id;
+ void __iomem *base_address = sp->base_address;
+ char tsbuf[32];
+
+ if (!sp_interrupt_pending(base_address))
+ return IRQ_NONE;
+
+ dbg("respond to interrupt at %s\n", get_timestamp(tsbuf));
+
+ if (mouse_interrupt_pending(sp)) {
+ ibmasm_handle_mouse_interrupt(sp);
+ clear_mouse_interrupt(sp);
+ }
+
+ mfa = get_mfa_outbound(base_address);
+ if (valid_mfa(mfa)) {
+ struct i2o_message *msg = get_i2o_message(base_address, mfa);
+ ibmasm_receive_message(sp, &msg->data, incoming_data_size(msg));
+ } else
+ dbg("didn't get a valid MFA\n");
+
+ set_mfa_outbound(base_address, mfa);
+ dbg("finished interrupt at %s\n", get_timestamp(tsbuf));
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/misc/ibmasm/lowlevel.h b/drivers/misc/ibmasm/lowlevel.h
new file mode 100644
index 00000000..e97848f5
--- /dev/null
+++ b/drivers/misc/ibmasm/lowlevel.h
@@ -0,0 +1,137 @@
+/*
+ * IBM ASM Service Processor Device Driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2004
+ *
+ * Author: Max Asböck <amax@us.ibm.com>
+ *
+ */
+
+/* Condor service processor specific hardware definitions */
+
+#ifndef __IBMASM_CONDOR_H__
+#define __IBMASM_CONDOR_H__
+
+#include <asm/io.h>
+
+#define VENDORID_IBM 0x1014
+#define DEVICEID_RSA 0x010F
+
+#define GET_MFA_ADDR(x) (x & 0xFFFFFF00)
+
+#define MAILBOX_FULL(x) (x & 0x00000001)
+
+#define NO_MFAS_AVAILABLE 0xFFFFFFFF
+
+
+#define INBOUND_QUEUE_PORT 0x40 /* contains address of next free MFA */
+#define OUTBOUND_QUEUE_PORT 0x44 /* contains address of posted MFA */
+
+#define SP_INTR_MASK 0x00000008
+#define UART_INTR_MASK 0x00000010
+
+#define INTR_STATUS_REGISTER 0x13A0
+#define INTR_CONTROL_REGISTER 0x13A4
+
+#define SCOUT_COM_A_BASE 0x0000
+#define SCOUT_COM_B_BASE 0x0100
+#define SCOUT_COM_C_BASE 0x0200
+#define SCOUT_COM_D_BASE 0x0300
+
+static inline int sp_interrupt_pending(void __iomem *base_address)
+{
+ return SP_INTR_MASK & readl(base_address + INTR_STATUS_REGISTER);
+}
+
+static inline int uart_interrupt_pending(void __iomem *base_address)
+{
+ return UART_INTR_MASK & readl(base_address + INTR_STATUS_REGISTER);
+}
+
+static inline void ibmasm_enable_interrupts(void __iomem *base_address, int mask)
+{
+ void __iomem *ctrl_reg = base_address + INTR_CONTROL_REGISTER;
+ writel( readl(ctrl_reg) & ~mask, ctrl_reg);
+}
+
+static inline void ibmasm_disable_interrupts(void __iomem *base_address, int mask)
+{
+ void __iomem *ctrl_reg = base_address + INTR_CONTROL_REGISTER;
+ writel( readl(ctrl_reg) | mask, ctrl_reg);
+}
+
+static inline void enable_sp_interrupts(void __iomem *base_address)
+{
+ ibmasm_enable_interrupts(base_address, SP_INTR_MASK);
+}
+
+static inline void disable_sp_interrupts(void __iomem *base_address)
+{
+ ibmasm_disable_interrupts(base_address, SP_INTR_MASK);
+}
+
+static inline void enable_uart_interrupts(void __iomem *base_address)
+{
+ ibmasm_enable_interrupts(base_address, UART_INTR_MASK);
+}
+
+static inline void disable_uart_interrupts(void __iomem *base_address)
+{
+ ibmasm_disable_interrupts(base_address, UART_INTR_MASK);
+}
+
+#define valid_mfa(mfa) ( (mfa) != NO_MFAS_AVAILABLE )
+
+static inline u32 get_mfa_outbound(void __iomem *base_address)
+{
+ int retry;
+ u32 mfa;
+
+ for (retry=0; retry<=10; retry++) {
+ mfa = readl(base_address + OUTBOUND_QUEUE_PORT);
+ if (valid_mfa(mfa))
+ break;
+ }
+ return mfa;
+}
+
+static inline void set_mfa_outbound(void __iomem *base_address, u32 mfa)
+{
+ writel(mfa, base_address + OUTBOUND_QUEUE_PORT);
+}
+
+static inline u32 get_mfa_inbound(void __iomem *base_address)
+{
+ u32 mfa = readl(base_address + INBOUND_QUEUE_PORT);
+
+ if (MAILBOX_FULL(mfa))
+ return 0;
+
+ return mfa;
+}
+
+static inline void set_mfa_inbound(void __iomem *base_address, u32 mfa)
+{
+ writel(mfa, base_address + INBOUND_QUEUE_PORT);
+}
+
+static inline struct i2o_message *get_i2o_message(void __iomem *base_address, u32 mfa)
+{
+ return (struct i2o_message *)(GET_MFA_ADDR(mfa) + base_address);
+}
+
+#endif /* __IBMASM_CONDOR_H__ */
diff --git a/drivers/misc/ibmasm/module.c b/drivers/misc/ibmasm/module.c
new file mode 100644
index 00000000..168d8008
--- /dev/null
+++ b/drivers/misc/ibmasm/module.c
@@ -0,0 +1,237 @@
+
+/*
+ * IBM ASM Service Processor Device Driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2004
+ *
+ * Author: Max Asböck <amax@us.ibm.com>
+ *
+ * This driver is based on code originally written by Pete Reynolds
+ * and others.
+ *
+ */
+
+/*
+ * The ASM device driver does the following things:
+ *
+ * 1) When loaded it sends a message to the service processor,
+ * indicating that an OS is * running. This causes the service processor
+ * to send periodic heartbeats to the OS.
+ *
+ * 2) Answers the periodic heartbeats sent by the service processor.
+ * Failure to do so would result in system reboot.
+ *
+ * 3) Acts as a pass through for dot commands sent from user applications.
+ * The interface for this is the ibmasmfs file system.
+ *
+ * 4) Allows user applications to register for event notification. Events
+ * are sent to the driver through interrupts. They can be read from user
+ * space through the ibmasmfs file system.
+ *
+ * 5) Allows user space applications to send heartbeats to the service
+ * processor (aka reverse heartbeats). Again this happens through ibmasmfs.
+ *
+ * 6) Handles remote mouse and keyboard event interrupts and makes them
+ * available to user applications through ibmasmfs.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include "ibmasm.h"
+#include "lowlevel.h"
+#include "remote.h"
+
+int ibmasm_debug = 0;
+module_param(ibmasm_debug, int , S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ibmasm_debug, " Set debug mode on or off");
+
+
+static int __devinit ibmasm_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ int result;
+ struct service_processor *sp;
+
+ if ((result = pci_enable_device(pdev))) {
+ dev_err(&pdev->dev, "Failed to enable PCI device\n");
+ return result;
+ }
+ if ((result = pci_request_regions(pdev, DRIVER_NAME))) {
+ dev_err(&pdev->dev, "Failed to allocate PCI resources\n");
+ goto error_resources;
+ }
+ /* vnc client won't work without bus-mastering */
+ pci_set_master(pdev);
+
+ sp = kzalloc(sizeof(struct service_processor), GFP_KERNEL);
+ if (sp == NULL) {
+ dev_err(&pdev->dev, "Failed to allocate memory\n");
+ result = -ENOMEM;
+ goto error_kmalloc;
+ }
+
+ spin_lock_init(&sp->lock);
+ INIT_LIST_HEAD(&sp->command_queue);
+
+ pci_set_drvdata(pdev, (void *)sp);
+ sp->dev = &pdev->dev;
+ sp->number = pdev->bus->number;
+ snprintf(sp->dirname, IBMASM_NAME_SIZE, "%d", sp->number);
+ snprintf(sp->devname, IBMASM_NAME_SIZE, "%s%d", DRIVER_NAME, sp->number);
+
+ if (ibmasm_event_buffer_init(sp)) {
+ dev_err(sp->dev, "Failed to allocate event buffer\n");
+ goto error_eventbuffer;
+ }
+
+ if (ibmasm_heartbeat_init(sp)) {
+ dev_err(sp->dev, "Failed to allocate heartbeat command\n");
+ goto error_heartbeat;
+ }
+
+ sp->irq = pdev->irq;
+ sp->base_address = pci_ioremap_bar(pdev, 0);
+ if (!sp->base_address) {
+ dev_err(sp->dev, "Failed to ioremap pci memory\n");
+ result = -ENODEV;
+ goto error_ioremap;
+ }
+
+ result = request_irq(sp->irq, ibmasm_interrupt_handler, IRQF_SHARED, sp->devname, (void*)sp);
+ if (result) {
+ dev_err(sp->dev, "Failed to register interrupt handler\n");
+ goto error_request_irq;
+ }
+
+ enable_sp_interrupts(sp->base_address);
+
+ result = ibmasm_init_remote_input_dev(sp);
+ if (result) {
+ dev_err(sp->dev, "Failed to initialize remote queue\n");
+ goto error_send_message;
+ }
+
+ result = ibmasm_send_driver_vpd(sp);
+ if (result) {
+ dev_err(sp->dev, "Failed to send driver VPD to service processor\n");
+ goto error_send_message;
+ }
+ result = ibmasm_send_os_state(sp, SYSTEM_STATE_OS_UP);
+ if (result) {
+ dev_err(sp->dev, "Failed to send OS state to service processor\n");
+ goto error_send_message;
+ }
+ ibmasmfs_add_sp(sp);
+
+ ibmasm_register_uart(sp);
+
+ return 0;
+
+error_send_message:
+ disable_sp_interrupts(sp->base_address);
+ ibmasm_free_remote_input_dev(sp);
+ free_irq(sp->irq, (void *)sp);
+error_request_irq:
+ iounmap(sp->base_address);
+error_ioremap:
+ ibmasm_heartbeat_exit(sp);
+error_heartbeat:
+ ibmasm_event_buffer_exit(sp);
+error_eventbuffer:
+ pci_set_drvdata(pdev, NULL);
+ kfree(sp);
+error_kmalloc:
+ pci_release_regions(pdev);
+error_resources:
+ pci_disable_device(pdev);
+
+ return result;
+}
+
+static void __devexit ibmasm_remove_one(struct pci_dev *pdev)
+{
+ struct service_processor *sp = (struct service_processor *)pci_get_drvdata(pdev);
+
+ dbg("Unregistering UART\n");
+ ibmasm_unregister_uart(sp);
+ dbg("Sending OS down message\n");
+ if (ibmasm_send_os_state(sp, SYSTEM_STATE_OS_DOWN))
+ err("failed to get repsonse to 'Send OS State' command\n");
+ dbg("Disabling heartbeats\n");
+ ibmasm_heartbeat_exit(sp);
+ dbg("Disabling interrupts\n");
+ disable_sp_interrupts(sp->base_address);
+ dbg("Freeing SP irq\n");
+ free_irq(sp->irq, (void *)sp);
+ dbg("Cleaning up\n");
+ ibmasm_free_remote_input_dev(sp);
+ iounmap(sp->base_address);
+ ibmasm_event_buffer_exit(sp);
+ pci_set_drvdata(pdev, NULL);
+ kfree(sp);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static struct pci_device_id ibmasm_pci_table[] =
+{
+ { PCI_DEVICE(VENDORID_IBM, DEVICEID_RSA) },
+ {},
+};
+
+static struct pci_driver ibmasm_driver = {
+ .name = DRIVER_NAME,
+ .id_table = ibmasm_pci_table,
+ .probe = ibmasm_init_one,
+ .remove = __devexit_p(ibmasm_remove_one),
+};
+
+static void __exit ibmasm_exit (void)
+{
+ ibmasm_unregister_panic_notifier();
+ ibmasmfs_unregister();
+ pci_unregister_driver(&ibmasm_driver);
+ info(DRIVER_DESC " version " DRIVER_VERSION " unloaded");
+}
+
+static int __init ibmasm_init(void)
+{
+ int result = pci_register_driver(&ibmasm_driver);
+ if (result)
+ return result;
+
+ result = ibmasmfs_register();
+ if (result) {
+ pci_unregister_driver(&ibmasm_driver);
+ err("Failed to register ibmasmfs file system");
+ return result;
+ }
+
+ ibmasm_register_panic_notifier();
+ info(DRIVER_DESC " version " DRIVER_VERSION " loaded");
+ return 0;
+}
+
+module_init(ibmasm_init);
+module_exit(ibmasm_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, ibmasm_pci_table);
+
diff --git a/drivers/misc/ibmasm/r_heartbeat.c b/drivers/misc/ibmasm/r_heartbeat.c
new file mode 100644
index 00000000..232034f5
--- /dev/null
+++ b/drivers/misc/ibmasm/r_heartbeat.c
@@ -0,0 +1,99 @@
+
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2004
+ *
+ * Author: Max Asböck <amax@us.ibm.com>
+ *
+ */
+
+#include <linux/sched.h>
+#include "ibmasm.h"
+#include "dot_command.h"
+
+/*
+ * Reverse Heartbeat, i.e. heartbeats sent from the driver to the
+ * service processor.
+ * These heartbeats are initiated by user level programs.
+ */
+
+/* the reverse heartbeat dot command */
+#pragma pack(1)
+static struct {
+ struct dot_command_header header;
+ unsigned char command[3];
+} rhb_dot_cmd = {
+ .header = {
+ .type = sp_read,
+ .command_size = 3,
+ .data_size = 0,
+ .status = 0
+ },
+ .command = { 4, 3, 6 }
+};
+#pragma pack()
+
+void ibmasm_init_reverse_heartbeat(struct service_processor *sp, struct reverse_heartbeat *rhb)
+{
+ init_waitqueue_head(&rhb->wait);
+ rhb->stopped = 0;
+}
+
+/**
+ * start_reverse_heartbeat
+ * Loop forever, sending a reverse heartbeat dot command to the service
+ * processor, then sleeping. The loop comes to an end if the service
+ * processor fails to respond 3 times or we were interrupted.
+ */
+int ibmasm_start_reverse_heartbeat(struct service_processor *sp, struct reverse_heartbeat *rhb)
+{
+ struct command *cmd;
+ int times_failed = 0;
+ int result = 1;
+
+ cmd = ibmasm_new_command(sp, sizeof rhb_dot_cmd);
+ if (!cmd)
+ return -ENOMEM;
+
+ while (times_failed < 3) {
+ memcpy(cmd->buffer, (void *)&rhb_dot_cmd, sizeof rhb_dot_cmd);
+ cmd->status = IBMASM_CMD_PENDING;
+ ibmasm_exec_command(sp, cmd);
+ ibmasm_wait_for_response(cmd, IBMASM_CMD_TIMEOUT_NORMAL);
+
+ if (cmd->status != IBMASM_CMD_COMPLETE)
+ times_failed++;
+
+ wait_event_interruptible_timeout(rhb->wait,
+ rhb->stopped,
+ REVERSE_HEARTBEAT_TIMEOUT * HZ);
+
+ if (signal_pending(current) || rhb->stopped) {
+ result = -EINTR;
+ break;
+ }
+ }
+ command_put(cmd);
+ rhb->stopped = 0;
+
+ return result;
+}
+
+void ibmasm_stop_reverse_heartbeat(struct reverse_heartbeat *rhb)
+{
+ rhb->stopped = 1;
+ wake_up_interruptible(&rhb->wait);
+}
diff --git a/drivers/misc/ibmasm/remote.c b/drivers/misc/ibmasm/remote.c
new file mode 100644
index 00000000..477bb43c
--- /dev/null
+++ b/drivers/misc/ibmasm/remote.c
@@ -0,0 +1,282 @@
+/*
+ * IBM ASM Service Processor Device Driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2004
+ *
+ * Authors: Max Asböck <amax@us.ibm.com>
+ * Vernon Mauery <vernux@us.ibm.com>
+ *
+ */
+
+/* Remote mouse and keyboard event handling functions */
+
+#include <linux/pci.h>
+#include "ibmasm.h"
+#include "remote.h"
+
+#define MOUSE_X_MAX 1600
+#define MOUSE_Y_MAX 1200
+
+static const unsigned short xlate_high[XLATE_SIZE] = {
+ [KEY_SYM_ENTER & 0xff] = KEY_ENTER,
+ [KEY_SYM_KPSLASH & 0xff] = KEY_KPSLASH,
+ [KEY_SYM_KPSTAR & 0xff] = KEY_KPASTERISK,
+ [KEY_SYM_KPMINUS & 0xff] = KEY_KPMINUS,
+ [KEY_SYM_KPDOT & 0xff] = KEY_KPDOT,
+ [KEY_SYM_KPPLUS & 0xff] = KEY_KPPLUS,
+ [KEY_SYM_KP0 & 0xff] = KEY_KP0,
+ [KEY_SYM_KP1 & 0xff] = KEY_KP1,
+ [KEY_SYM_KP2 & 0xff] = KEY_KP2, [KEY_SYM_KPDOWN & 0xff] = KEY_KP2,
+ [KEY_SYM_KP3 & 0xff] = KEY_KP3,
+ [KEY_SYM_KP4 & 0xff] = KEY_KP4, [KEY_SYM_KPLEFT & 0xff] = KEY_KP4,
+ [KEY_SYM_KP5 & 0xff] = KEY_KP5,
+ [KEY_SYM_KP6 & 0xff] = KEY_KP6, [KEY_SYM_KPRIGHT & 0xff] = KEY_KP6,
+ [KEY_SYM_KP7 & 0xff] = KEY_KP7,
+ [KEY_SYM_KP8 & 0xff] = KEY_KP8, [KEY_SYM_KPUP & 0xff] = KEY_KP8,
+ [KEY_SYM_KP9 & 0xff] = KEY_KP9,
+ [KEY_SYM_BK_SPC & 0xff] = KEY_BACKSPACE,
+ [KEY_SYM_TAB & 0xff] = KEY_TAB,
+ [KEY_SYM_CTRL & 0xff] = KEY_LEFTCTRL,
+ [KEY_SYM_ALT & 0xff] = KEY_LEFTALT,
+ [KEY_SYM_INSERT & 0xff] = KEY_INSERT,
+ [KEY_SYM_DELETE & 0xff] = KEY_DELETE,
+ [KEY_SYM_SHIFT & 0xff] = KEY_LEFTSHIFT,
+ [KEY_SYM_UARROW & 0xff] = KEY_UP,
+ [KEY_SYM_DARROW & 0xff] = KEY_DOWN,
+ [KEY_SYM_LARROW & 0xff] = KEY_LEFT,
+ [KEY_SYM_RARROW & 0xff] = KEY_RIGHT,
+ [KEY_SYM_ESCAPE & 0xff] = KEY_ESC,
+ [KEY_SYM_PAGEUP & 0xff] = KEY_PAGEUP,
+ [KEY_SYM_PAGEDOWN & 0xff] = KEY_PAGEDOWN,
+ [KEY_SYM_HOME & 0xff] = KEY_HOME,
+ [KEY_SYM_END & 0xff] = KEY_END,
+ [KEY_SYM_F1 & 0xff] = KEY_F1,
+ [KEY_SYM_F2 & 0xff] = KEY_F2,
+ [KEY_SYM_F3 & 0xff] = KEY_F3,
+ [KEY_SYM_F4 & 0xff] = KEY_F4,
+ [KEY_SYM_F5 & 0xff] = KEY_F5,
+ [KEY_SYM_F6 & 0xff] = KEY_F6,
+ [KEY_SYM_F7 & 0xff] = KEY_F7,
+ [KEY_SYM_F8 & 0xff] = KEY_F8,
+ [KEY_SYM_F9 & 0xff] = KEY_F9,
+ [KEY_SYM_F10 & 0xff] = KEY_F10,
+ [KEY_SYM_F11 & 0xff] = KEY_F11,
+ [KEY_SYM_F12 & 0xff] = KEY_F12,
+ [KEY_SYM_CAP_LOCK & 0xff] = KEY_CAPSLOCK,
+ [KEY_SYM_NUM_LOCK & 0xff] = KEY_NUMLOCK,
+ [KEY_SYM_SCR_LOCK & 0xff] = KEY_SCROLLLOCK,
+};
+
+static const unsigned short xlate[XLATE_SIZE] = {
+ [NO_KEYCODE] = KEY_RESERVED,
+ [KEY_SYM_SPACE] = KEY_SPACE,
+ [KEY_SYM_TILDE] = KEY_GRAVE, [KEY_SYM_BKTIC] = KEY_GRAVE,
+ [KEY_SYM_ONE] = KEY_1, [KEY_SYM_BANG] = KEY_1,
+ [KEY_SYM_TWO] = KEY_2, [KEY_SYM_AT] = KEY_2,
+ [KEY_SYM_THREE] = KEY_3, [KEY_SYM_POUND] = KEY_3,
+ [KEY_SYM_FOUR] = KEY_4, [KEY_SYM_DOLLAR] = KEY_4,
+ [KEY_SYM_FIVE] = KEY_5, [KEY_SYM_PERCENT] = KEY_5,
+ [KEY_SYM_SIX] = KEY_6, [KEY_SYM_CARAT] = KEY_6,
+ [KEY_SYM_SEVEN] = KEY_7, [KEY_SYM_AMPER] = KEY_7,
+ [KEY_SYM_EIGHT] = KEY_8, [KEY_SYM_STAR] = KEY_8,
+ [KEY_SYM_NINE] = KEY_9, [KEY_SYM_LPAREN] = KEY_9,
+ [KEY_SYM_ZERO] = KEY_0, [KEY_SYM_RPAREN] = KEY_0,
+ [KEY_SYM_MINUS] = KEY_MINUS, [KEY_SYM_USCORE] = KEY_MINUS,
+ [KEY_SYM_EQUAL] = KEY_EQUAL, [KEY_SYM_PLUS] = KEY_EQUAL,
+ [KEY_SYM_LBRKT] = KEY_LEFTBRACE, [KEY_SYM_LCURLY] = KEY_LEFTBRACE,
+ [KEY_SYM_RBRKT] = KEY_RIGHTBRACE, [KEY_SYM_RCURLY] = KEY_RIGHTBRACE,
+ [KEY_SYM_SLASH] = KEY_BACKSLASH, [KEY_SYM_PIPE] = KEY_BACKSLASH,
+ [KEY_SYM_TIC] = KEY_APOSTROPHE, [KEY_SYM_QUOTE] = KEY_APOSTROPHE,
+ [KEY_SYM_SEMIC] = KEY_SEMICOLON, [KEY_SYM_COLON] = KEY_SEMICOLON,
+ [KEY_SYM_COMMA] = KEY_COMMA, [KEY_SYM_LT] = KEY_COMMA,
+ [KEY_SYM_PERIOD] = KEY_DOT, [KEY_SYM_GT] = KEY_DOT,
+ [KEY_SYM_BSLASH] = KEY_SLASH, [KEY_SYM_QMARK] = KEY_SLASH,
+ [KEY_SYM_A] = KEY_A, [KEY_SYM_a] = KEY_A,
+ [KEY_SYM_B] = KEY_B, [KEY_SYM_b] = KEY_B,
+ [KEY_SYM_C] = KEY_C, [KEY_SYM_c] = KEY_C,
+ [KEY_SYM_D] = KEY_D, [KEY_SYM_d] = KEY_D,
+ [KEY_SYM_E] = KEY_E, [KEY_SYM_e] = KEY_E,
+ [KEY_SYM_F] = KEY_F, [KEY_SYM_f] = KEY_F,
+ [KEY_SYM_G] = KEY_G, [KEY_SYM_g] = KEY_G,
+ [KEY_SYM_H] = KEY_H, [KEY_SYM_h] = KEY_H,
+ [KEY_SYM_I] = KEY_I, [KEY_SYM_i] = KEY_I,
+ [KEY_SYM_J] = KEY_J, [KEY_SYM_j] = KEY_J,
+ [KEY_SYM_K] = KEY_K, [KEY_SYM_k] = KEY_K,
+ [KEY_SYM_L] = KEY_L, [KEY_SYM_l] = KEY_L,
+ [KEY_SYM_M] = KEY_M, [KEY_SYM_m] = KEY_M,
+ [KEY_SYM_N] = KEY_N, [KEY_SYM_n] = KEY_N,
+ [KEY_SYM_O] = KEY_O, [KEY_SYM_o] = KEY_O,
+ [KEY_SYM_P] = KEY_P, [KEY_SYM_p] = KEY_P,
+ [KEY_SYM_Q] = KEY_Q, [KEY_SYM_q] = KEY_Q,
+ [KEY_SYM_R] = KEY_R, [KEY_SYM_r] = KEY_R,
+ [KEY_SYM_S] = KEY_S, [KEY_SYM_s] = KEY_S,
+ [KEY_SYM_T] = KEY_T, [KEY_SYM_t] = KEY_T,
+ [KEY_SYM_U] = KEY_U, [KEY_SYM_u] = KEY_U,
+ [KEY_SYM_V] = KEY_V, [KEY_SYM_v] = KEY_V,
+ [KEY_SYM_W] = KEY_W, [KEY_SYM_w] = KEY_W,
+ [KEY_SYM_X] = KEY_X, [KEY_SYM_x] = KEY_X,
+ [KEY_SYM_Y] = KEY_Y, [KEY_SYM_y] = KEY_Y,
+ [KEY_SYM_Z] = KEY_Z, [KEY_SYM_z] = KEY_Z,
+};
+
+static void print_input(struct remote_input *input)
+{
+ if (input->type == INPUT_TYPE_MOUSE) {
+ unsigned char buttons = input->mouse_buttons;
+ dbg("remote mouse movement: (x,y)=(%d,%d)%s%s%s%s\n",
+ input->data.mouse.x, input->data.mouse.y,
+ (buttons) ? " -- buttons:" : "",
+ (buttons & REMOTE_BUTTON_LEFT) ? "left " : "",
+ (buttons & REMOTE_BUTTON_MIDDLE) ? "middle " : "",
+ (buttons & REMOTE_BUTTON_RIGHT) ? "right" : ""
+ );
+ } else {
+ dbg("remote keypress (code, flag, down):"
+ "%d (0x%x) [0x%x] [0x%x]\n",
+ input->data.keyboard.key_code,
+ input->data.keyboard.key_code,
+ input->data.keyboard.key_flag,
+ input->data.keyboard.key_down
+ );
+ }
+}
+
+static void send_mouse_event(struct input_dev *dev, struct remote_input *input)
+{
+ unsigned char buttons = input->mouse_buttons;
+
+ input_report_abs(dev, ABS_X, input->data.mouse.x);
+ input_report_abs(dev, ABS_Y, input->data.mouse.y);
+ input_report_key(dev, BTN_LEFT, buttons & REMOTE_BUTTON_LEFT);
+ input_report_key(dev, BTN_MIDDLE, buttons & REMOTE_BUTTON_MIDDLE);
+ input_report_key(dev, BTN_RIGHT, buttons & REMOTE_BUTTON_RIGHT);
+ input_sync(dev);
+}
+
+static void send_keyboard_event(struct input_dev *dev,
+ struct remote_input *input)
+{
+ unsigned int key;
+ unsigned short code = input->data.keyboard.key_code;
+
+ if (code & 0xff00)
+ key = xlate_high[code & 0xff];
+ else
+ key = xlate[code];
+ input_report_key(dev, key, input->data.keyboard.key_down);
+ input_sync(dev);
+}
+
+void ibmasm_handle_mouse_interrupt(struct service_processor *sp)
+{
+ unsigned long reader;
+ unsigned long writer;
+ struct remote_input input;
+
+ reader = get_queue_reader(sp);
+ writer = get_queue_writer(sp);
+
+ while (reader != writer) {
+ memcpy_fromio(&input, get_queue_entry(sp, reader),
+ sizeof(struct remote_input));
+
+ print_input(&input);
+ if (input.type == INPUT_TYPE_MOUSE) {
+ send_mouse_event(sp->remote.mouse_dev, &input);
+ } else if (input.type == INPUT_TYPE_KEYBOARD) {
+ send_keyboard_event(sp->remote.keybd_dev, &input);
+ } else
+ break;
+
+ reader = advance_queue_reader(sp, reader);
+ writer = get_queue_writer(sp);
+ }
+}
+
+int ibmasm_init_remote_input_dev(struct service_processor *sp)
+{
+ /* set up the mouse input device */
+ struct input_dev *mouse_dev, *keybd_dev;
+ struct pci_dev *pdev = to_pci_dev(sp->dev);
+ int error = -ENOMEM;
+ int i;
+
+ sp->remote.mouse_dev = mouse_dev = input_allocate_device();
+ sp->remote.keybd_dev = keybd_dev = input_allocate_device();
+
+ if (!mouse_dev || !keybd_dev)
+ goto err_free_devices;
+
+ mouse_dev->id.bustype = BUS_PCI;
+ mouse_dev->id.vendor = pdev->vendor;
+ mouse_dev->id.product = pdev->device;
+ mouse_dev->id.version = 1;
+ mouse_dev->dev.parent = sp->dev;
+ mouse_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+ mouse_dev->keybit[BIT_WORD(BTN_MOUSE)] = BIT_MASK(BTN_LEFT) |
+ BIT_MASK(BTN_RIGHT) | BIT_MASK(BTN_MIDDLE);
+ set_bit(BTN_TOUCH, mouse_dev->keybit);
+ mouse_dev->name = "ibmasm RSA I remote mouse";
+ input_set_abs_params(mouse_dev, ABS_X, 0, MOUSE_X_MAX, 0, 0);
+ input_set_abs_params(mouse_dev, ABS_Y, 0, MOUSE_Y_MAX, 0, 0);
+
+ keybd_dev->id.bustype = BUS_PCI;
+ keybd_dev->id.vendor = pdev->vendor;
+ keybd_dev->id.product = pdev->device;
+ keybd_dev->id.version = 2;
+ keybd_dev->dev.parent = sp->dev;
+ keybd_dev->evbit[0] = BIT_MASK(EV_KEY);
+ keybd_dev->name = "ibmasm RSA I remote keyboard";
+
+ for (i = 0; i < XLATE_SIZE; i++) {
+ if (xlate_high[i])
+ set_bit(xlate_high[i], keybd_dev->keybit);
+ if (xlate[i])
+ set_bit(xlate[i], keybd_dev->keybit);
+ }
+
+ error = input_register_device(mouse_dev);
+ if (error)
+ goto err_free_devices;
+
+ error = input_register_device(keybd_dev);
+ if (error)
+ goto err_unregister_mouse_dev;
+
+ enable_mouse_interrupts(sp);
+
+ printk(KERN_INFO "ibmasm remote responding to events on RSA card %d\n", sp->number);
+
+ return 0;
+
+ err_unregister_mouse_dev:
+ input_unregister_device(mouse_dev);
+ mouse_dev = NULL; /* so we don't try to free it again below */
+ err_free_devices:
+ input_free_device(mouse_dev);
+ input_free_device(keybd_dev);
+
+ return error;
+}
+
+void ibmasm_free_remote_input_dev(struct service_processor *sp)
+{
+ disable_mouse_interrupts(sp);
+ input_unregister_device(sp->remote.mouse_dev);
+ input_unregister_device(sp->remote.keybd_dev);
+}
+
diff --git a/drivers/misc/ibmasm/remote.h b/drivers/misc/ibmasm/remote.h
new file mode 100644
index 00000000..a7729ef7
--- /dev/null
+++ b/drivers/misc/ibmasm/remote.h
@@ -0,0 +1,270 @@
+
+/*
+ * IBM ASM Service Processor Device Driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2004
+ *
+ * Author: Max Asböck <amax@us.ibm.com>
+ *
+ * Originally written by Pete Reynolds
+ */
+
+#ifndef _IBMASM_REMOTE_H_
+#define _IBMASM_REMOTE_H_
+
+#include <asm/io.h>
+
+/* pci offsets */
+#define CONDOR_MOUSE_DATA 0x000AC000
+#define CONDOR_MOUSE_ISR_CONTROL 0x00
+#define CONDOR_MOUSE_ISR_STATUS 0x04
+#define CONDOR_MOUSE_Q_READER 0x08
+#define CONDOR_MOUSE_Q_WRITER 0x0C
+#define CONDOR_MOUSE_Q_BEGIN 0x10
+#define CONDOR_MOUSE_MAX_X 0x14
+#define CONDOR_MOUSE_MAX_Y 0x18
+
+#define CONDOR_INPUT_DESKTOP_INFO 0x1F0
+#define CONDOR_INPUT_DISPLAY_RESX 0x1F4
+#define CONDOR_INPUT_DISPLAY_RESY 0x1F8
+#define CONDOR_INPUT_DISPLAY_BITS 0x1FC
+#define CONDOR_OUTPUT_VNC_STATUS 0x200
+
+#define CONDOR_MOUSE_INTR_STATUS_MASK 0x00000001
+
+#define INPUT_TYPE_MOUSE 0x1
+#define INPUT_TYPE_KEYBOARD 0x2
+
+
+/* mouse button states received from SP */
+#define REMOTE_DOUBLE_CLICK 0xF0
+#define REMOTE_BUTTON_LEFT 0x01
+#define REMOTE_BUTTON_MIDDLE 0x02
+#define REMOTE_BUTTON_RIGHT 0x04
+
+/* size of keysym/keycode translation matricies */
+#define XLATE_SIZE 256
+
+struct mouse_input {
+ unsigned short y;
+ unsigned short x;
+};
+
+
+struct keyboard_input {
+ unsigned short key_code;
+ unsigned char key_flag;
+ unsigned char key_down;
+};
+
+
+
+struct remote_input {
+ union {
+ struct mouse_input mouse;
+ struct keyboard_input keyboard;
+ } data;
+
+ unsigned char type;
+ unsigned char pad1;
+ unsigned char mouse_buttons;
+ unsigned char pad3;
+};
+
+#define mouse_addr(sp) (sp->base_address + CONDOR_MOUSE_DATA)
+#define display_width(sp) (mouse_addr(sp) + CONDOR_INPUT_DISPLAY_RESX)
+#define display_height(sp) (mouse_addr(sp) + CONDOR_INPUT_DISPLAY_RESY)
+#define display_depth(sp) (mouse_addr(sp) + CONDOR_INPUT_DISPLAY_BITS)
+#define desktop_info(sp) (mouse_addr(sp) + CONDOR_INPUT_DESKTOP_INFO)
+#define vnc_status(sp) (mouse_addr(sp) + CONDOR_OUTPUT_VNC_STATUS)
+#define isr_control(sp) (mouse_addr(sp) + CONDOR_MOUSE_ISR_CONTROL)
+
+#define mouse_interrupt_pending(sp) readl(mouse_addr(sp) + CONDOR_MOUSE_ISR_STATUS)
+#define clear_mouse_interrupt(sp) writel(0, mouse_addr(sp) + CONDOR_MOUSE_ISR_STATUS)
+#define enable_mouse_interrupts(sp) writel(1, mouse_addr(sp) + CONDOR_MOUSE_ISR_CONTROL)
+#define disable_mouse_interrupts(sp) writel(0, mouse_addr(sp) + CONDOR_MOUSE_ISR_CONTROL)
+
+/* remote input queue operations */
+#define REMOTE_QUEUE_SIZE 60
+
+#define get_queue_writer(sp) readl(mouse_addr(sp) + CONDOR_MOUSE_Q_WRITER)
+#define get_queue_reader(sp) readl(mouse_addr(sp) + CONDOR_MOUSE_Q_READER)
+#define set_queue_reader(sp, reader) writel(reader, mouse_addr(sp) + CONDOR_MOUSE_Q_READER)
+
+#define queue_begin (mouse_addr(sp) + CONDOR_MOUSE_Q_BEGIN)
+
+#define get_queue_entry(sp, read_index) \
+ ((void*)(queue_begin + read_index * sizeof(struct remote_input)))
+
+static inline int advance_queue_reader(struct service_processor *sp, unsigned long reader)
+{
+ reader++;
+ if (reader == REMOTE_QUEUE_SIZE)
+ reader = 0;
+
+ set_queue_reader(sp, reader);
+ return reader;
+}
+
+#define NO_KEYCODE 0
+#define KEY_SYM_BK_SPC 0xFF08
+#define KEY_SYM_TAB 0xFF09
+#define KEY_SYM_ENTER 0xFF0D
+#define KEY_SYM_SCR_LOCK 0xFF14
+#define KEY_SYM_ESCAPE 0xFF1B
+#define KEY_SYM_HOME 0xFF50
+#define KEY_SYM_LARROW 0xFF51
+#define KEY_SYM_UARROW 0xFF52
+#define KEY_SYM_RARROW 0xFF53
+#define KEY_SYM_DARROW 0xFF54
+#define KEY_SYM_PAGEUP 0xFF55
+#define KEY_SYM_PAGEDOWN 0xFF56
+#define KEY_SYM_END 0xFF57
+#define KEY_SYM_INSERT 0xFF63
+#define KEY_SYM_NUM_LOCK 0xFF7F
+#define KEY_SYM_KPSTAR 0xFFAA
+#define KEY_SYM_KPPLUS 0xFFAB
+#define KEY_SYM_KPMINUS 0xFFAD
+#define KEY_SYM_KPDOT 0xFFAE
+#define KEY_SYM_KPSLASH 0xFFAF
+#define KEY_SYM_KPRIGHT 0xFF96
+#define KEY_SYM_KPUP 0xFF97
+#define KEY_SYM_KPLEFT 0xFF98
+#define KEY_SYM_KPDOWN 0xFF99
+#define KEY_SYM_KP0 0xFFB0
+#define KEY_SYM_KP1 0xFFB1
+#define KEY_SYM_KP2 0xFFB2
+#define KEY_SYM_KP3 0xFFB3
+#define KEY_SYM_KP4 0xFFB4
+#define KEY_SYM_KP5 0xFFB5
+#define KEY_SYM_KP6 0xFFB6
+#define KEY_SYM_KP7 0xFFB7
+#define KEY_SYM_KP8 0xFFB8
+#define KEY_SYM_KP9 0xFFB9
+#define KEY_SYM_F1 0xFFBE // 1B 5B 5B 41
+#define KEY_SYM_F2 0xFFBF // 1B 5B 5B 42
+#define KEY_SYM_F3 0xFFC0 // 1B 5B 5B 43
+#define KEY_SYM_F4 0xFFC1 // 1B 5B 5B 44
+#define KEY_SYM_F5 0xFFC2 // 1B 5B 5B 45
+#define KEY_SYM_F6 0xFFC3 // 1B 5B 31 37 7E
+#define KEY_SYM_F7 0xFFC4 // 1B 5B 31 38 7E
+#define KEY_SYM_F8 0xFFC5 // 1B 5B 31 39 7E
+#define KEY_SYM_F9 0xFFC6 // 1B 5B 32 30 7E
+#define KEY_SYM_F10 0xFFC7 // 1B 5B 32 31 7E
+#define KEY_SYM_F11 0xFFC8 // 1B 5B 32 33 7E
+#define KEY_SYM_F12 0xFFC9 // 1B 5B 32 34 7E
+#define KEY_SYM_SHIFT 0xFFE1
+#define KEY_SYM_CTRL 0xFFE3
+#define KEY_SYM_ALT 0xFFE9
+#define KEY_SYM_CAP_LOCK 0xFFE5
+#define KEY_SYM_DELETE 0xFFFF
+#define KEY_SYM_TILDE 0x60
+#define KEY_SYM_BKTIC 0x7E
+#define KEY_SYM_ONE 0x31
+#define KEY_SYM_BANG 0x21
+#define KEY_SYM_TWO 0x32
+#define KEY_SYM_AT 0x40
+#define KEY_SYM_THREE 0x33
+#define KEY_SYM_POUND 0x23
+#define KEY_SYM_FOUR 0x34
+#define KEY_SYM_DOLLAR 0x24
+#define KEY_SYM_FIVE 0x35
+#define KEY_SYM_PERCENT 0x25
+#define KEY_SYM_SIX 0x36
+#define KEY_SYM_CARAT 0x5E
+#define KEY_SYM_SEVEN 0x37
+#define KEY_SYM_AMPER 0x26
+#define KEY_SYM_EIGHT 0x38
+#define KEY_SYM_STAR 0x2A
+#define KEY_SYM_NINE 0x39
+#define KEY_SYM_LPAREN 0x28
+#define KEY_SYM_ZERO 0x30
+#define KEY_SYM_RPAREN 0x29
+#define KEY_SYM_MINUS 0x2D
+#define KEY_SYM_USCORE 0x5F
+#define KEY_SYM_EQUAL 0x2B
+#define KEY_SYM_PLUS 0x3D
+#define KEY_SYM_LBRKT 0x5B
+#define KEY_SYM_LCURLY 0x7B
+#define KEY_SYM_RBRKT 0x5D
+#define KEY_SYM_RCURLY 0x7D
+#define KEY_SYM_SLASH 0x5C
+#define KEY_SYM_PIPE 0x7C
+#define KEY_SYM_TIC 0x27
+#define KEY_SYM_QUOTE 0x22
+#define KEY_SYM_SEMIC 0x3B
+#define KEY_SYM_COLON 0x3A
+#define KEY_SYM_COMMA 0x2C
+#define KEY_SYM_LT 0x3C
+#define KEY_SYM_PERIOD 0x2E
+#define KEY_SYM_GT 0x3E
+#define KEY_SYM_BSLASH 0x2F
+#define KEY_SYM_QMARK 0x3F
+#define KEY_SYM_A 0x41
+#define KEY_SYM_B 0x42
+#define KEY_SYM_C 0x43
+#define KEY_SYM_D 0x44
+#define KEY_SYM_E 0x45
+#define KEY_SYM_F 0x46
+#define KEY_SYM_G 0x47
+#define KEY_SYM_H 0x48
+#define KEY_SYM_I 0x49
+#define KEY_SYM_J 0x4A
+#define KEY_SYM_K 0x4B
+#define KEY_SYM_L 0x4C
+#define KEY_SYM_M 0x4D
+#define KEY_SYM_N 0x4E
+#define KEY_SYM_O 0x4F
+#define KEY_SYM_P 0x50
+#define KEY_SYM_Q 0x51
+#define KEY_SYM_R 0x52
+#define KEY_SYM_S 0x53
+#define KEY_SYM_T 0x54
+#define KEY_SYM_U 0x55
+#define KEY_SYM_V 0x56
+#define KEY_SYM_W 0x57
+#define KEY_SYM_X 0x58
+#define KEY_SYM_Y 0x59
+#define KEY_SYM_Z 0x5A
+#define KEY_SYM_a 0x61
+#define KEY_SYM_b 0x62
+#define KEY_SYM_c 0x63
+#define KEY_SYM_d 0x64
+#define KEY_SYM_e 0x65
+#define KEY_SYM_f 0x66
+#define KEY_SYM_g 0x67
+#define KEY_SYM_h 0x68
+#define KEY_SYM_i 0x69
+#define KEY_SYM_j 0x6A
+#define KEY_SYM_k 0x6B
+#define KEY_SYM_l 0x6C
+#define KEY_SYM_m 0x6D
+#define KEY_SYM_n 0x6E
+#define KEY_SYM_o 0x6F
+#define KEY_SYM_p 0x70
+#define KEY_SYM_q 0x71
+#define KEY_SYM_r 0x72
+#define KEY_SYM_s 0x73
+#define KEY_SYM_t 0x74
+#define KEY_SYM_u 0x75
+#define KEY_SYM_v 0x76
+#define KEY_SYM_w 0x77
+#define KEY_SYM_x 0x78
+#define KEY_SYM_y 0x79
+#define KEY_SYM_z 0x7A
+#define KEY_SYM_SPACE 0x20
+#endif /* _IBMASM_REMOTE_H_ */
diff --git a/drivers/misc/ibmasm/uart.c b/drivers/misc/ibmasm/uart.c
new file mode 100644
index 00000000..1dcb9ae1
--- /dev/null
+++ b/drivers/misc/ibmasm/uart.c
@@ -0,0 +1,72 @@
+
+/*
+ * IBM ASM Service Processor Device Driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2004
+ *
+ * Author: Max Asböck <amax@us.ibm.com>
+ *
+ */
+
+#include <linux/termios.h>
+#include <linux/tty.h>
+#include <linux/serial_core.h>
+#include <linux/serial_reg.h>
+#include <linux/serial_8250.h>
+#include "ibmasm.h"
+#include "lowlevel.h"
+
+
+void ibmasm_register_uart(struct service_processor *sp)
+{
+ struct uart_port uport;
+ void __iomem *iomem_base;
+
+ iomem_base = sp->base_address + SCOUT_COM_B_BASE;
+
+ /* read the uart scratch register to determine if the UART
+ * is dedicated to the service processor or if the OS can use it
+ */
+ if (0 == readl(iomem_base + UART_SCR)) {
+ dev_info(sp->dev, "IBM SP UART not registered, owned by service processor\n");
+ sp->serial_line = -1;
+ return;
+ }
+
+ memset(&uport, 0, sizeof(struct uart_port));
+ uport.irq = sp->irq;
+ uport.uartclk = 3686400;
+ uport.flags = UPF_SHARE_IRQ;
+ uport.iotype = UPIO_MEM;
+ uport.membase = iomem_base;
+
+ sp->serial_line = serial8250_register_port(&uport);
+ if (sp->serial_line < 0) {
+ dev_err(sp->dev, "Failed to register serial port\n");
+ return;
+ }
+ enable_uart_interrupts(sp->base_address);
+}
+
+void ibmasm_unregister_uart(struct service_processor *sp)
+{
+ if (sp->serial_line < 0)
+ return;
+
+ disable_uart_interrupts(sp->base_address);
+ serial8250_unregister_port(sp->serial_line);
+}
diff --git a/drivers/misc/ics932s401.c b/drivers/misc/ics932s401.c
new file mode 100644
index 00000000..00295367
--- /dev/null
+++ b/drivers/misc/ics932s401.c
@@ -0,0 +1,495 @@
+/*
+ * A driver for the Integrated Circuits ICS932S401
+ * Copyright (C) 2008 IBM
+ *
+ * Author: Darrick J. Wong <djwong@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/log2.h>
+#include <linux/slab.h>
+
+/* Addresses to scan */
+static const unsigned short normal_i2c[] = { 0x69, I2C_CLIENT_END };
+
+/* ICS932S401 registers */
+#define ICS932S401_REG_CFG2 0x01
+#define ICS932S401_CFG1_SPREAD 0x01
+#define ICS932S401_REG_CFG7 0x06
+#define ICS932S401_FS_MASK 0x07
+#define ICS932S401_REG_VENDOR_REV 0x07
+#define ICS932S401_VENDOR 1
+#define ICS932S401_VENDOR_MASK 0x0F
+#define ICS932S401_REV 4
+#define ICS932S401_REV_SHIFT 4
+#define ICS932S401_REG_DEVICE 0x09
+#define ICS932S401_DEVICE 11
+#define ICS932S401_REG_CTRL 0x0A
+#define ICS932S401_MN_ENABLED 0x80
+#define ICS932S401_CPU_ALT 0x04
+#define ICS932S401_SRC_ALT 0x08
+#define ICS932S401_REG_CPU_M_CTRL 0x0B
+#define ICS932S401_M_MASK 0x3F
+#define ICS932S401_REG_CPU_N_CTRL 0x0C
+#define ICS932S401_REG_CPU_SPREAD1 0x0D
+#define ICS932S401_REG_CPU_SPREAD2 0x0E
+#define ICS932S401_SPREAD_MASK 0x7FFF
+#define ICS932S401_REG_SRC_M_CTRL 0x0F
+#define ICS932S401_REG_SRC_N_CTRL 0x10
+#define ICS932S401_REG_SRC_SPREAD1 0x11
+#define ICS932S401_REG_SRC_SPREAD2 0x12
+#define ICS932S401_REG_CPU_DIVISOR 0x13
+#define ICS932S401_CPU_DIVISOR_SHIFT 4
+#define ICS932S401_REG_PCISRC_DIVISOR 0x14
+#define ICS932S401_SRC_DIVISOR_MASK 0x0F
+#define ICS932S401_PCI_DIVISOR_SHIFT 4
+
+/* Base clock is 14.318MHz */
+#define BASE_CLOCK 14318
+
+#define NUM_REGS 21
+#define NUM_MIRRORED_REGS 15
+
+static int regs_to_copy[NUM_MIRRORED_REGS] = {
+ ICS932S401_REG_CFG2,
+ ICS932S401_REG_CFG7,
+ ICS932S401_REG_VENDOR_REV,
+ ICS932S401_REG_DEVICE,
+ ICS932S401_REG_CTRL,
+ ICS932S401_REG_CPU_M_CTRL,
+ ICS932S401_REG_CPU_N_CTRL,
+ ICS932S401_REG_CPU_SPREAD1,
+ ICS932S401_REG_CPU_SPREAD2,
+ ICS932S401_REG_SRC_M_CTRL,
+ ICS932S401_REG_SRC_N_CTRL,
+ ICS932S401_REG_SRC_SPREAD1,
+ ICS932S401_REG_SRC_SPREAD2,
+ ICS932S401_REG_CPU_DIVISOR,
+ ICS932S401_REG_PCISRC_DIVISOR,
+};
+
+/* How often do we reread sensors values? (In jiffies) */
+#define SENSOR_REFRESH_INTERVAL (2 * HZ)
+
+/* How often do we reread sensor limit values? (In jiffies) */
+#define LIMIT_REFRESH_INTERVAL (60 * HZ)
+
+struct ics932s401_data {
+ struct attribute_group attrs;
+ struct mutex lock;
+ char sensors_valid;
+ unsigned long sensors_last_updated; /* In jiffies */
+
+ u8 regs[NUM_REGS];
+};
+
+static int ics932s401_probe(struct i2c_client *client,
+ const struct i2c_device_id *id);
+static int ics932s401_detect(struct i2c_client *client,
+ struct i2c_board_info *info);
+static int ics932s401_remove(struct i2c_client *client);
+
+static const struct i2c_device_id ics932s401_id[] = {
+ { "ics932s401", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ics932s401_id);
+
+static struct i2c_driver ics932s401_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "ics932s401",
+ },
+ .probe = ics932s401_probe,
+ .remove = ics932s401_remove,
+ .id_table = ics932s401_id,
+ .detect = ics932s401_detect,
+ .address_list = normal_i2c,
+};
+
+static struct ics932s401_data *ics932s401_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ics932s401_data *data = i2c_get_clientdata(client);
+ unsigned long local_jiffies = jiffies;
+ int i, temp;
+
+ mutex_lock(&data->lock);
+ if (time_before(local_jiffies, data->sensors_last_updated +
+ SENSOR_REFRESH_INTERVAL)
+ && data->sensors_valid)
+ goto out;
+
+ /*
+ * Each register must be read as a word and then right shifted 8 bits.
+ * Not really sure why this is; setting the "byte count programming"
+ * register to 1 does not fix this problem.
+ */
+ for (i = 0; i < NUM_MIRRORED_REGS; i++) {
+ temp = i2c_smbus_read_word_data(client, regs_to_copy[i]);
+ data->regs[regs_to_copy[i]] = temp >> 8;
+ }
+
+ data->sensors_last_updated = local_jiffies;
+ data->sensors_valid = 1;
+
+out:
+ mutex_unlock(&data->lock);
+ return data;
+}
+
+static ssize_t show_spread_enabled(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct ics932s401_data *data = ics932s401_update_device(dev);
+
+ if (data->regs[ICS932S401_REG_CFG2] & ICS932S401_CFG1_SPREAD)
+ return sprintf(buf, "1\n");
+
+ return sprintf(buf, "0\n");
+}
+
+/* bit to cpu khz map */
+static const int fs_speeds[] = {
+ 266666,
+ 133333,
+ 200000,
+ 166666,
+ 333333,
+ 100000,
+ 400000,
+ 0,
+};
+
+/* clock divisor map */
+static const int divisors[] = {2, 3, 5, 15, 4, 6, 10, 30, 8, 12, 20, 60, 16,
+ 24, 40, 120};
+
+/* Calculate CPU frequency from the M/N registers. */
+static int calculate_cpu_freq(struct ics932s401_data *data)
+{
+ int m, n, freq;
+
+ m = data->regs[ICS932S401_REG_CPU_M_CTRL] & ICS932S401_M_MASK;
+ n = data->regs[ICS932S401_REG_CPU_N_CTRL];
+
+ /* Pull in bits 8 & 9 from the M register */
+ n |= ((int)data->regs[ICS932S401_REG_CPU_M_CTRL] & 0x80) << 1;
+ n |= ((int)data->regs[ICS932S401_REG_CPU_M_CTRL] & 0x40) << 3;
+
+ freq = BASE_CLOCK * (n + 8) / (m + 2);
+ freq /= divisors[data->regs[ICS932S401_REG_CPU_DIVISOR] >>
+ ICS932S401_CPU_DIVISOR_SHIFT];
+
+ return freq;
+}
+
+static ssize_t show_cpu_clock(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct ics932s401_data *data = ics932s401_update_device(dev);
+
+ return sprintf(buf, "%d\n", calculate_cpu_freq(data));
+}
+
+static ssize_t show_cpu_clock_sel(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct ics932s401_data *data = ics932s401_update_device(dev);
+ int freq;
+
+ if (data->regs[ICS932S401_REG_CTRL] & ICS932S401_MN_ENABLED)
+ freq = calculate_cpu_freq(data);
+ else {
+ /* Freq is neatly wrapped up for us */
+ int fid = data->regs[ICS932S401_REG_CFG7] & ICS932S401_FS_MASK;
+ freq = fs_speeds[fid];
+ if (data->regs[ICS932S401_REG_CTRL] & ICS932S401_CPU_ALT) {
+ switch (freq) {
+ case 166666:
+ freq = 160000;
+ break;
+ case 333333:
+ freq = 320000;
+ break;
+ }
+ }
+ }
+
+ return sprintf(buf, "%d\n", freq);
+}
+
+/* Calculate SRC frequency from the M/N registers. */
+static int calculate_src_freq(struct ics932s401_data *data)
+{
+ int m, n, freq;
+
+ m = data->regs[ICS932S401_REG_SRC_M_CTRL] & ICS932S401_M_MASK;
+ n = data->regs[ICS932S401_REG_SRC_N_CTRL];
+
+ /* Pull in bits 8 & 9 from the M register */
+ n |= ((int)data->regs[ICS932S401_REG_SRC_M_CTRL] & 0x80) << 1;
+ n |= ((int)data->regs[ICS932S401_REG_SRC_M_CTRL] & 0x40) << 3;
+
+ freq = BASE_CLOCK * (n + 8) / (m + 2);
+ freq /= divisors[data->regs[ICS932S401_REG_PCISRC_DIVISOR] &
+ ICS932S401_SRC_DIVISOR_MASK];
+
+ return freq;
+}
+
+static ssize_t show_src_clock(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct ics932s401_data *data = ics932s401_update_device(dev);
+
+ return sprintf(buf, "%d\n", calculate_src_freq(data));
+}
+
+static ssize_t show_src_clock_sel(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct ics932s401_data *data = ics932s401_update_device(dev);
+ int freq;
+
+ if (data->regs[ICS932S401_REG_CTRL] & ICS932S401_MN_ENABLED)
+ freq = calculate_src_freq(data);
+ else
+ /* Freq is neatly wrapped up for us */
+ if (data->regs[ICS932S401_REG_CTRL] & ICS932S401_CPU_ALT &&
+ data->regs[ICS932S401_REG_CTRL] & ICS932S401_SRC_ALT)
+ freq = 96000;
+ else
+ freq = 100000;
+
+ return sprintf(buf, "%d\n", freq);
+}
+
+/* Calculate PCI frequency from the SRC M/N registers. */
+static int calculate_pci_freq(struct ics932s401_data *data)
+{
+ int m, n, freq;
+
+ m = data->regs[ICS932S401_REG_SRC_M_CTRL] & ICS932S401_M_MASK;
+ n = data->regs[ICS932S401_REG_SRC_N_CTRL];
+
+ /* Pull in bits 8 & 9 from the M register */
+ n |= ((int)data->regs[ICS932S401_REG_SRC_M_CTRL] & 0x80) << 1;
+ n |= ((int)data->regs[ICS932S401_REG_SRC_M_CTRL] & 0x40) << 3;
+
+ freq = BASE_CLOCK * (n + 8) / (m + 2);
+ freq /= divisors[data->regs[ICS932S401_REG_PCISRC_DIVISOR] >>
+ ICS932S401_PCI_DIVISOR_SHIFT];
+
+ return freq;
+}
+
+static ssize_t show_pci_clock(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct ics932s401_data *data = ics932s401_update_device(dev);
+
+ return sprintf(buf, "%d\n", calculate_pci_freq(data));
+}
+
+static ssize_t show_pci_clock_sel(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct ics932s401_data *data = ics932s401_update_device(dev);
+ int freq;
+
+ if (data->regs[ICS932S401_REG_CTRL] & ICS932S401_MN_ENABLED)
+ freq = calculate_pci_freq(data);
+ else
+ freq = 33333;
+
+ return sprintf(buf, "%d\n", freq);
+}
+
+static ssize_t show_value(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf);
+
+static ssize_t show_spread(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf);
+
+static DEVICE_ATTR(spread_enabled, S_IRUGO, show_spread_enabled, NULL);
+static DEVICE_ATTR(cpu_clock_selection, S_IRUGO, show_cpu_clock_sel, NULL);
+static DEVICE_ATTR(cpu_clock, S_IRUGO, show_cpu_clock, NULL);
+static DEVICE_ATTR(src_clock_selection, S_IRUGO, show_src_clock_sel, NULL);
+static DEVICE_ATTR(src_clock, S_IRUGO, show_src_clock, NULL);
+static DEVICE_ATTR(pci_clock_selection, S_IRUGO, show_pci_clock_sel, NULL);
+static DEVICE_ATTR(pci_clock, S_IRUGO, show_pci_clock, NULL);
+static DEVICE_ATTR(usb_clock, S_IRUGO, show_value, NULL);
+static DEVICE_ATTR(ref_clock, S_IRUGO, show_value, NULL);
+static DEVICE_ATTR(cpu_spread, S_IRUGO, show_spread, NULL);
+static DEVICE_ATTR(src_spread, S_IRUGO, show_spread, NULL);
+
+static struct attribute *ics932s401_attr[] =
+{
+ &dev_attr_spread_enabled.attr,
+ &dev_attr_cpu_clock_selection.attr,
+ &dev_attr_cpu_clock.attr,
+ &dev_attr_src_clock_selection.attr,
+ &dev_attr_src_clock.attr,
+ &dev_attr_pci_clock_selection.attr,
+ &dev_attr_pci_clock.attr,
+ &dev_attr_usb_clock.attr,
+ &dev_attr_ref_clock.attr,
+ &dev_attr_cpu_spread.attr,
+ &dev_attr_src_spread.attr,
+ NULL
+};
+
+static ssize_t show_value(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ int x;
+
+ if (devattr == &dev_attr_usb_clock)
+ x = 48000;
+ else if (devattr == &dev_attr_ref_clock)
+ x = BASE_CLOCK;
+ else
+ BUG();
+
+ return sprintf(buf, "%d\n", x);
+}
+
+static ssize_t show_spread(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct ics932s401_data *data = ics932s401_update_device(dev);
+ int reg;
+ unsigned long val;
+
+ if (!(data->regs[ICS932S401_REG_CFG2] & ICS932S401_CFG1_SPREAD))
+ return sprintf(buf, "0%%\n");
+
+ if (devattr == &dev_attr_src_spread)
+ reg = ICS932S401_REG_SRC_SPREAD1;
+ else if (devattr == &dev_attr_cpu_spread)
+ reg = ICS932S401_REG_CPU_SPREAD1;
+ else
+ BUG();
+
+ val = data->regs[reg] | (data->regs[reg + 1] << 8);
+ val &= ICS932S401_SPREAD_MASK;
+
+ /* Scale 0..2^14 to -0.5. */
+ val = 500000 * val / 16384;
+ return sprintf(buf, "-0.%lu%%\n", val);
+}
+
+/* Return 0 if detection is successful, -ENODEV otherwise */
+static int ics932s401_detect(struct i2c_client *client,
+ struct i2c_board_info *info)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ int vendor, device, revision;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -ENODEV;
+
+ vendor = i2c_smbus_read_word_data(client, ICS932S401_REG_VENDOR_REV);
+ vendor >>= 8;
+ revision = vendor >> ICS932S401_REV_SHIFT;
+ vendor &= ICS932S401_VENDOR_MASK;
+ if (vendor != ICS932S401_VENDOR)
+ return -ENODEV;
+
+ device = i2c_smbus_read_word_data(client, ICS932S401_REG_DEVICE);
+ device >>= 8;
+ if (device != ICS932S401_DEVICE)
+ return -ENODEV;
+
+ if (revision != ICS932S401_REV)
+ dev_info(&adapter->dev, "Unknown revision %d\n", revision);
+
+ strlcpy(info->type, "ics932s401", I2C_NAME_SIZE);
+
+ return 0;
+}
+
+static int ics932s401_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ics932s401_data *data;
+ int err;
+
+ data = kzalloc(sizeof(struct ics932s401_data), GFP_KERNEL);
+ if (!data) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->lock);
+
+ dev_info(&client->dev, "%s chip found\n", client->name);
+
+ /* Register sysfs hooks */
+ data->attrs.attrs = ics932s401_attr;
+ err = sysfs_create_group(&client->dev.kobj, &data->attrs);
+ if (err)
+ goto exit_free;
+
+ return 0;
+
+exit_free:
+ kfree(data);
+exit:
+ return err;
+}
+
+static int ics932s401_remove(struct i2c_client *client)
+{
+ struct ics932s401_data *data = i2c_get_clientdata(client);
+
+ sysfs_remove_group(&client->dev.kobj, &data->attrs);
+ kfree(data);
+ return 0;
+}
+
+module_i2c_driver(ics932s401_driver);
+
+MODULE_AUTHOR("Darrick J. Wong <djwong@us.ibm.com>");
+MODULE_DESCRIPTION("ICS932S401 driver");
+MODULE_LICENSE("GPL");
+
+/* IBM IntelliStation Z30 */
+MODULE_ALIAS("dmi:bvnIBM:*:rn9228:*");
+MODULE_ALIAS("dmi:bvnIBM:*:rn9232:*");
+
+/* IBM x3650/x3550 */
+MODULE_ALIAS("dmi:bvnIBM:*:pnIBMSystemx3650*");
+MODULE_ALIAS("dmi:bvnIBM:*:pnIBMSystemx3550*");
diff --git a/drivers/misc/ioc4.c b/drivers/misc/ioc4.c
new file mode 100644
index 00000000..df03dd3b
--- /dev/null
+++ b/drivers/misc/ioc4.c
@@ -0,0 +1,502 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005-2006 Silicon Graphics, Inc. All Rights Reserved.
+ */
+
+/* This file contains the master driver module for use by SGI IOC4 subdrivers.
+ *
+ * It allocates any resources shared between multiple subdevices, and
+ * provides accessor functions (where needed) and the like for those
+ * resources. It also provides a mechanism for the subdevice modules
+ * to support loading and unloading.
+ *
+ * Non-shared resources (e.g. external interrupt A_INT_OUT register page
+ * alias, serial port and UART registers) are handled by the subdevice
+ * modules themselves.
+ *
+ * This is all necessary because IOC4 is not implemented as a multi-function
+ * PCI device, but an amalgamation of disparate registers for several
+ * types of device (ATA, serial, external interrupts). The normal
+ * resource management in the kernel doesn't have quite the right interfaces
+ * to handle this situation (e.g. multiple modules can't claim the same
+ * PCI ID), thus this IOC4 master module.
+ */
+
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/ioc4.h>
+#include <linux/ktime.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/time.h>
+#include <asm/io.h>
+
+/***************
+ * Definitions *
+ ***************/
+
+/* Tweakable values */
+
+/* PCI bus speed detection/calibration */
+#define IOC4_CALIBRATE_COUNT 63 /* Calibration cycle period */
+#define IOC4_CALIBRATE_CYCLES 256 /* Average over this many cycles */
+#define IOC4_CALIBRATE_DISCARD 2 /* Discard first few cycles */
+#define IOC4_CALIBRATE_LOW_MHZ 25 /* Lower bound on bus speed sanity */
+#define IOC4_CALIBRATE_HIGH_MHZ 75 /* Upper bound on bus speed sanity */
+#define IOC4_CALIBRATE_DEFAULT_MHZ 66 /* Assumed if sanity check fails */
+
+/************************
+ * Submodule management *
+ ************************/
+
+static DEFINE_MUTEX(ioc4_mutex);
+
+static LIST_HEAD(ioc4_devices);
+static LIST_HEAD(ioc4_submodules);
+
+/* Register an IOC4 submodule */
+int
+ioc4_register_submodule(struct ioc4_submodule *is)
+{
+ struct ioc4_driver_data *idd;
+
+ mutex_lock(&ioc4_mutex);
+ list_add(&is->is_list, &ioc4_submodules);
+
+ /* Initialize submodule for each IOC4 */
+ if (!is->is_probe)
+ goto out;
+
+ list_for_each_entry(idd, &ioc4_devices, idd_list) {
+ if (is->is_probe(idd)) {
+ printk(KERN_WARNING
+ "%s: IOC4 submodule %s probe failed "
+ "for pci_dev %s",
+ __func__, module_name(is->is_owner),
+ pci_name(idd->idd_pdev));
+ }
+ }
+ out:
+ mutex_unlock(&ioc4_mutex);
+ return 0;
+}
+
+/* Unregister an IOC4 submodule */
+void
+ioc4_unregister_submodule(struct ioc4_submodule *is)
+{
+ struct ioc4_driver_data *idd;
+
+ mutex_lock(&ioc4_mutex);
+ list_del(&is->is_list);
+
+ /* Remove submodule for each IOC4 */
+ if (!is->is_remove)
+ goto out;
+
+ list_for_each_entry(idd, &ioc4_devices, idd_list) {
+ if (is->is_remove(idd)) {
+ printk(KERN_WARNING
+ "%s: IOC4 submodule %s remove failed "
+ "for pci_dev %s.\n",
+ __func__, module_name(is->is_owner),
+ pci_name(idd->idd_pdev));
+ }
+ }
+ out:
+ mutex_unlock(&ioc4_mutex);
+}
+
+/*********************
+ * Device management *
+ *********************/
+
+#define IOC4_CALIBRATE_LOW_LIMIT \
+ (1000*IOC4_EXTINT_COUNT_DIVISOR/IOC4_CALIBRATE_LOW_MHZ)
+#define IOC4_CALIBRATE_HIGH_LIMIT \
+ (1000*IOC4_EXTINT_COUNT_DIVISOR/IOC4_CALIBRATE_HIGH_MHZ)
+#define IOC4_CALIBRATE_DEFAULT \
+ (1000*IOC4_EXTINT_COUNT_DIVISOR/IOC4_CALIBRATE_DEFAULT_MHZ)
+
+#define IOC4_CALIBRATE_END \
+ (IOC4_CALIBRATE_CYCLES + IOC4_CALIBRATE_DISCARD)
+
+#define IOC4_INT_OUT_MODE_TOGGLE 0x7 /* Toggle INT_OUT every COUNT+1 ticks */
+
+/* Determines external interrupt output clock period of the PCI bus an
+ * IOC4 is attached to. This value can be used to determine the PCI
+ * bus speed.
+ *
+ * IOC4 has a design feature that various internal timers are derived from
+ * the PCI bus clock. This causes IOC4 device drivers to need to take the
+ * bus speed into account when setting various register values (e.g. INT_OUT
+ * register COUNT field, UART divisors, etc). Since this information is
+ * needed by several subdrivers, it is determined by the main IOC4 driver,
+ * even though the following code utilizes external interrupt registers
+ * to perform the speed calculation.
+ */
+static void __devinit
+ioc4_clock_calibrate(struct ioc4_driver_data *idd)
+{
+ union ioc4_int_out int_out;
+ union ioc4_gpcr gpcr;
+ unsigned int state, last_state = 1;
+ struct timespec start_ts, end_ts;
+ uint64_t start, end, period;
+ unsigned int count = 0;
+
+ /* Enable output */
+ gpcr.raw = 0;
+ gpcr.fields.dir = IOC4_GPCR_DIR_0;
+ gpcr.fields.int_out_en = 1;
+ writel(gpcr.raw, &idd->idd_misc_regs->gpcr_s.raw);
+
+ /* Reset to power-on state */
+ writel(0, &idd->idd_misc_regs->int_out.raw);
+ mmiowb();
+
+ /* Set up square wave */
+ int_out.raw = 0;
+ int_out.fields.count = IOC4_CALIBRATE_COUNT;
+ int_out.fields.mode = IOC4_INT_OUT_MODE_TOGGLE;
+ int_out.fields.diag = 0;
+ writel(int_out.raw, &idd->idd_misc_regs->int_out.raw);
+ mmiowb();
+
+ /* Check square wave period averaged over some number of cycles */
+ do {
+ int_out.raw = readl(&idd->idd_misc_regs->int_out.raw);
+ state = int_out.fields.int_out;
+ if (!last_state && state) {
+ count++;
+ if (count == IOC4_CALIBRATE_END) {
+ ktime_get_ts(&end_ts);
+ break;
+ } else if (count == IOC4_CALIBRATE_DISCARD)
+ ktime_get_ts(&start_ts);
+ }
+ last_state = state;
+ } while (1);
+
+ /* Calculation rearranged to preserve intermediate precision.
+ * Logically:
+ * 1. "end - start" gives us the measurement period over all
+ * the square wave cycles.
+ * 2. Divide by number of square wave cycles to get the period
+ * of a square wave cycle.
+ * 3. Divide by 2*(int_out.fields.count+1), which is the formula
+ * by which the IOC4 generates the square wave, to get the
+ * period of an IOC4 INT_OUT count.
+ */
+ end = end_ts.tv_sec * NSEC_PER_SEC + end_ts.tv_nsec;
+ start = start_ts.tv_sec * NSEC_PER_SEC + start_ts.tv_nsec;
+ period = (end - start) /
+ (IOC4_CALIBRATE_CYCLES * 2 * (IOC4_CALIBRATE_COUNT + 1));
+
+ /* Bounds check the result. */
+ if (period > IOC4_CALIBRATE_LOW_LIMIT ||
+ period < IOC4_CALIBRATE_HIGH_LIMIT) {
+ printk(KERN_INFO
+ "IOC4 %s: Clock calibration failed. Assuming"
+ "PCI clock is %d ns.\n",
+ pci_name(idd->idd_pdev),
+ IOC4_CALIBRATE_DEFAULT / IOC4_EXTINT_COUNT_DIVISOR);
+ period = IOC4_CALIBRATE_DEFAULT;
+ } else {
+ u64 ns = period;
+
+ do_div(ns, IOC4_EXTINT_COUNT_DIVISOR);
+ printk(KERN_DEBUG
+ "IOC4 %s: PCI clock is %llu ns.\n",
+ pci_name(idd->idd_pdev), (unsigned long long)ns);
+ }
+
+ /* Remember results. We store the extint clock period rather
+ * than the PCI clock period so that greater precision is
+ * retained. Divide by IOC4_EXTINT_COUNT_DIVISOR to get
+ * PCI clock period.
+ */
+ idd->count_period = period;
+}
+
+/* There are three variants of IOC4 cards: IO9, IO10, and PCI-RT.
+ * Each brings out different combinations of IOC4 signals, thus.
+ * the IOC4 subdrivers need to know to which we're attached.
+ *
+ * We look for the presence of a SCSI (IO9) or SATA (IO10) controller
+ * on the same PCI bus at slot number 3 to differentiate IO9 from IO10.
+ * If neither is present, it's a PCI-RT.
+ */
+static unsigned int __devinit
+ioc4_variant(struct ioc4_driver_data *idd)
+{
+ struct pci_dev *pdev = NULL;
+ int found = 0;
+
+ /* IO9: Look for a QLogic ISP 12160 at the same bus and slot 3. */
+ do {
+ pdev = pci_get_device(PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_ISP12160, pdev);
+ if (pdev &&
+ idd->idd_pdev->bus->number == pdev->bus->number &&
+ 3 == PCI_SLOT(pdev->devfn))
+ found = 1;
+ } while (pdev && !found);
+ if (NULL != pdev) {
+ pci_dev_put(pdev);
+ return IOC4_VARIANT_IO9;
+ }
+
+ /* IO10: Look for a Vitesse VSC 7174 at the same bus and slot 3. */
+ pdev = NULL;
+ do {
+ pdev = pci_get_device(PCI_VENDOR_ID_VITESSE,
+ PCI_DEVICE_ID_VITESSE_VSC7174, pdev);
+ if (pdev &&
+ idd->idd_pdev->bus->number == pdev->bus->number &&
+ 3 == PCI_SLOT(pdev->devfn))
+ found = 1;
+ } while (pdev && !found);
+ if (NULL != pdev) {
+ pci_dev_put(pdev);
+ return IOC4_VARIANT_IO10;
+ }
+
+ /* PCI-RT: No SCSI/SATA controller will be present */
+ return IOC4_VARIANT_PCI_RT;
+}
+
+static void
+ioc4_load_modules(struct work_struct *work)
+{
+ request_module("sgiioc4");
+}
+
+static DECLARE_WORK(ioc4_load_modules_work, ioc4_load_modules);
+
+/* Adds a new instance of an IOC4 card */
+static int __devinit
+ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
+{
+ struct ioc4_driver_data *idd;
+ struct ioc4_submodule *is;
+ uint32_t pcmd;
+ int ret;
+
+ /* Enable IOC4 and take ownership of it */
+ if ((ret = pci_enable_device(pdev))) {
+ printk(KERN_WARNING
+ "%s: Failed to enable IOC4 device for pci_dev %s.\n",
+ __func__, pci_name(pdev));
+ goto out;
+ }
+ pci_set_master(pdev);
+
+ /* Set up per-IOC4 data */
+ idd = kmalloc(sizeof(struct ioc4_driver_data), GFP_KERNEL);
+ if (!idd) {
+ printk(KERN_WARNING
+ "%s: Failed to allocate IOC4 data for pci_dev %s.\n",
+ __func__, pci_name(pdev));
+ ret = -ENODEV;
+ goto out_idd;
+ }
+ idd->idd_pdev = pdev;
+ idd->idd_pci_id = pci_id;
+
+ /* Map IOC4 misc registers. These are shared between subdevices
+ * so the main IOC4 module manages them.
+ */
+ idd->idd_bar0 = pci_resource_start(idd->idd_pdev, 0);
+ if (!idd->idd_bar0) {
+ printk(KERN_WARNING
+ "%s: Unable to find IOC4 misc resource "
+ "for pci_dev %s.\n",
+ __func__, pci_name(idd->idd_pdev));
+ ret = -ENODEV;
+ goto out_pci;
+ }
+ if (!request_mem_region(idd->idd_bar0, sizeof(struct ioc4_misc_regs),
+ "ioc4_misc")) {
+ printk(KERN_WARNING
+ "%s: Unable to request IOC4 misc region "
+ "for pci_dev %s.\n",
+ __func__, pci_name(idd->idd_pdev));
+ ret = -ENODEV;
+ goto out_pci;
+ }
+ idd->idd_misc_regs = ioremap(idd->idd_bar0,
+ sizeof(struct ioc4_misc_regs));
+ if (!idd->idd_misc_regs) {
+ printk(KERN_WARNING
+ "%s: Unable to remap IOC4 misc region "
+ "for pci_dev %s.\n",
+ __func__, pci_name(idd->idd_pdev));
+ ret = -ENODEV;
+ goto out_misc_region;
+ }
+
+ /* Failsafe portion of per-IOC4 initialization */
+
+ /* Detect card variant */
+ idd->idd_variant = ioc4_variant(idd);
+ printk(KERN_INFO "IOC4 %s: %s card detected.\n", pci_name(pdev),
+ idd->idd_variant == IOC4_VARIANT_IO9 ? "IO9" :
+ idd->idd_variant == IOC4_VARIANT_PCI_RT ? "PCI-RT" :
+ idd->idd_variant == IOC4_VARIANT_IO10 ? "IO10" : "unknown");
+
+ /* Initialize IOC4 */
+ pci_read_config_dword(idd->idd_pdev, PCI_COMMAND, &pcmd);
+ pci_write_config_dword(idd->idd_pdev, PCI_COMMAND,
+ pcmd | PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
+
+ /* Determine PCI clock */
+ ioc4_clock_calibrate(idd);
+
+ /* Disable/clear all interrupts. Need to do this here lest
+ * one submodule request the shared IOC4 IRQ, but interrupt
+ * is generated by a different subdevice.
+ */
+ /* Disable */
+ writel(~0, &idd->idd_misc_regs->other_iec.raw);
+ writel(~0, &idd->idd_misc_regs->sio_iec);
+ /* Clear (i.e. acknowledge) */
+ writel(~0, &idd->idd_misc_regs->other_ir.raw);
+ writel(~0, &idd->idd_misc_regs->sio_ir);
+
+ /* Track PCI-device specific data */
+ idd->idd_serial_data = NULL;
+ pci_set_drvdata(idd->idd_pdev, idd);
+
+ mutex_lock(&ioc4_mutex);
+ list_add_tail(&idd->idd_list, &ioc4_devices);
+
+ /* Add this IOC4 to all submodules */
+ list_for_each_entry(is, &ioc4_submodules, is_list) {
+ if (is->is_probe && is->is_probe(idd)) {
+ printk(KERN_WARNING
+ "%s: IOC4 submodule 0x%s probe failed "
+ "for pci_dev %s.\n",
+ __func__, module_name(is->is_owner),
+ pci_name(idd->idd_pdev));
+ }
+ }
+ mutex_unlock(&ioc4_mutex);
+
+ /* Request sgiioc4 IDE driver on boards that bring that functionality
+ * off of IOC4. The root filesystem may be hosted on a drive connected
+ * to IOC4, so we need to make sure the sgiioc4 driver is loaded as it
+ * won't be picked up by modprobes due to the ioc4 module owning the
+ * PCI device.
+ */
+ if (idd->idd_variant != IOC4_VARIANT_PCI_RT) {
+ /* Request the module from a work procedure as the modprobe
+ * goes out to a userland helper and that will hang if done
+ * directly from ioc4_probe().
+ */
+ printk(KERN_INFO "IOC4 loading sgiioc4 submodule\n");
+ schedule_work(&ioc4_load_modules_work);
+ }
+
+ return 0;
+
+out_misc_region:
+ release_mem_region(idd->idd_bar0, sizeof(struct ioc4_misc_regs));
+out_pci:
+ kfree(idd);
+out_idd:
+ pci_disable_device(pdev);
+out:
+ return ret;
+}
+
+/* Removes a particular instance of an IOC4 card. */
+static void __devexit
+ioc4_remove(struct pci_dev *pdev)
+{
+ struct ioc4_submodule *is;
+ struct ioc4_driver_data *idd;
+
+ idd = pci_get_drvdata(pdev);
+
+ /* Remove this IOC4 from all submodules */
+ mutex_lock(&ioc4_mutex);
+ list_for_each_entry(is, &ioc4_submodules, is_list) {
+ if (is->is_remove && is->is_remove(idd)) {
+ printk(KERN_WARNING
+ "%s: IOC4 submodule 0x%s remove failed "
+ "for pci_dev %s.\n",
+ __func__, module_name(is->is_owner),
+ pci_name(idd->idd_pdev));
+ }
+ }
+ mutex_unlock(&ioc4_mutex);
+
+ /* Release resources */
+ iounmap(idd->idd_misc_regs);
+ if (!idd->idd_bar0) {
+ printk(KERN_WARNING
+ "%s: Unable to get IOC4 misc mapping for pci_dev %s. "
+ "Device removal may be incomplete.\n",
+ __func__, pci_name(idd->idd_pdev));
+ }
+ release_mem_region(idd->idd_bar0, sizeof(struct ioc4_misc_regs));
+
+ /* Disable IOC4 and relinquish */
+ pci_disable_device(pdev);
+
+ /* Remove and free driver data */
+ mutex_lock(&ioc4_mutex);
+ list_del(&idd->idd_list);
+ mutex_unlock(&ioc4_mutex);
+ kfree(idd);
+}
+
+static struct pci_device_id ioc4_id_table[] = {
+ {PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC4, PCI_ANY_ID,
+ PCI_ANY_ID, 0x0b4000, 0xFFFFFF},
+ {0}
+};
+
+static struct pci_driver ioc4_driver = {
+ .name = "IOC4",
+ .id_table = ioc4_id_table,
+ .probe = ioc4_probe,
+ .remove = __devexit_p(ioc4_remove),
+};
+
+MODULE_DEVICE_TABLE(pci, ioc4_id_table);
+
+/*********************
+ * Module management *
+ *********************/
+
+/* Module load */
+static int __init
+ioc4_init(void)
+{
+ return pci_register_driver(&ioc4_driver);
+}
+
+/* Module unload */
+static void __exit
+ioc4_exit(void)
+{
+ /* Ensure ioc4_load_modules() has completed before exiting */
+ flush_work_sync(&ioc4_load_modules_work);
+ pci_unregister_driver(&ioc4_driver);
+}
+
+module_init(ioc4_init);
+module_exit(ioc4_exit);
+
+MODULE_AUTHOR("Brent Casavant - Silicon Graphics, Inc. <bcasavan@sgi.com>");
+MODULE_DESCRIPTION("PCI driver master module for SGI IOC4 Base-IO Card");
+MODULE_LICENSE("GPL");
+
+EXPORT_SYMBOL(ioc4_register_submodule);
+EXPORT_SYMBOL(ioc4_unregister_submodule);
diff --git a/drivers/misc/isl29003.c b/drivers/misc/isl29003.c
new file mode 100644
index 00000000..eb5de2e2
--- /dev/null
+++ b/drivers/misc/isl29003.c
@@ -0,0 +1,463 @@
+/*
+ * isl29003.c - Linux kernel module for
+ * Intersil ISL29003 ambient light sensor
+ *
+ * See file:Documentation/misc-devices/isl29003
+ *
+ * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
+ *
+ * Based on code written by
+ * Rodolfo Giometti <giometti@linux.it>
+ * Eurotech S.p.A. <info@eurotech.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+
+#define ISL29003_DRV_NAME "isl29003"
+#define DRIVER_VERSION "1.0"
+
+#define ISL29003_REG_COMMAND 0x00
+#define ISL29003_ADC_ENABLED (1 << 7)
+#define ISL29003_ADC_PD (1 << 6)
+#define ISL29003_TIMING_INT (1 << 5)
+#define ISL29003_MODE_SHIFT (2)
+#define ISL29003_MODE_MASK (0x3 << ISL29003_MODE_SHIFT)
+#define ISL29003_RES_SHIFT (0)
+#define ISL29003_RES_MASK (0x3 << ISL29003_RES_SHIFT)
+
+#define ISL29003_REG_CONTROL 0x01
+#define ISL29003_INT_FLG (1 << 5)
+#define ISL29003_RANGE_SHIFT (2)
+#define ISL29003_RANGE_MASK (0x3 << ISL29003_RANGE_SHIFT)
+#define ISL29003_INT_PERSISTS_SHIFT (0)
+#define ISL29003_INT_PERSISTS_MASK (0xf << ISL29003_INT_PERSISTS_SHIFT)
+
+#define ISL29003_REG_IRQ_THRESH_HI 0x02
+#define ISL29003_REG_IRQ_THRESH_LO 0x03
+#define ISL29003_REG_LSB_SENSOR 0x04
+#define ISL29003_REG_MSB_SENSOR 0x05
+#define ISL29003_REG_LSB_TIMER 0x06
+#define ISL29003_REG_MSB_TIMER 0x07
+
+#define ISL29003_NUM_CACHABLE_REGS 4
+
+struct isl29003_data {
+ struct i2c_client *client;
+ struct mutex lock;
+ u8 reg_cache[ISL29003_NUM_CACHABLE_REGS];
+ u8 power_state_before_suspend;
+};
+
+static int gain_range[] = {
+ 1000, 4000, 16000, 64000
+};
+
+/*
+ * register access helpers
+ */
+
+static int __isl29003_read_reg(struct i2c_client *client,
+ u32 reg, u8 mask, u8 shift)
+{
+ struct isl29003_data *data = i2c_get_clientdata(client);
+ return (data->reg_cache[reg] & mask) >> shift;
+}
+
+static int __isl29003_write_reg(struct i2c_client *client,
+ u32 reg, u8 mask, u8 shift, u8 val)
+{
+ struct isl29003_data *data = i2c_get_clientdata(client);
+ int ret = 0;
+ u8 tmp;
+
+ if (reg >= ISL29003_NUM_CACHABLE_REGS)
+ return -EINVAL;
+
+ mutex_lock(&data->lock);
+
+ tmp = data->reg_cache[reg];
+ tmp &= ~mask;
+ tmp |= val << shift;
+
+ ret = i2c_smbus_write_byte_data(client, reg, tmp);
+ if (!ret)
+ data->reg_cache[reg] = tmp;
+
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
+/*
+ * internally used functions
+ */
+
+/* range */
+static int isl29003_get_range(struct i2c_client *client)
+{
+ return __isl29003_read_reg(client, ISL29003_REG_CONTROL,
+ ISL29003_RANGE_MASK, ISL29003_RANGE_SHIFT);
+}
+
+static int isl29003_set_range(struct i2c_client *client, int range)
+{
+ return __isl29003_write_reg(client, ISL29003_REG_CONTROL,
+ ISL29003_RANGE_MASK, ISL29003_RANGE_SHIFT, range);
+}
+
+/* resolution */
+static int isl29003_get_resolution(struct i2c_client *client)
+{
+ return __isl29003_read_reg(client, ISL29003_REG_COMMAND,
+ ISL29003_RES_MASK, ISL29003_RES_SHIFT);
+}
+
+static int isl29003_set_resolution(struct i2c_client *client, int res)
+{
+ return __isl29003_write_reg(client, ISL29003_REG_COMMAND,
+ ISL29003_RES_MASK, ISL29003_RES_SHIFT, res);
+}
+
+/* mode */
+static int isl29003_get_mode(struct i2c_client *client)
+{
+ return __isl29003_read_reg(client, ISL29003_REG_COMMAND,
+ ISL29003_RES_MASK, ISL29003_RES_SHIFT);
+}
+
+static int isl29003_set_mode(struct i2c_client *client, int mode)
+{
+ return __isl29003_write_reg(client, ISL29003_REG_COMMAND,
+ ISL29003_RES_MASK, ISL29003_RES_SHIFT, mode);
+}
+
+/* power_state */
+static int isl29003_set_power_state(struct i2c_client *client, int state)
+{
+ return __isl29003_write_reg(client, ISL29003_REG_COMMAND,
+ ISL29003_ADC_ENABLED | ISL29003_ADC_PD, 0,
+ state ? ISL29003_ADC_ENABLED : ISL29003_ADC_PD);
+}
+
+static int isl29003_get_power_state(struct i2c_client *client)
+{
+ struct isl29003_data *data = i2c_get_clientdata(client);
+ u8 cmdreg = data->reg_cache[ISL29003_REG_COMMAND];
+ return ~cmdreg & ISL29003_ADC_PD;
+}
+
+static int isl29003_get_adc_value(struct i2c_client *client)
+{
+ struct isl29003_data *data = i2c_get_clientdata(client);
+ int lsb, msb, range, bitdepth;
+
+ mutex_lock(&data->lock);
+ lsb = i2c_smbus_read_byte_data(client, ISL29003_REG_LSB_SENSOR);
+
+ if (lsb < 0) {
+ mutex_unlock(&data->lock);
+ return lsb;
+ }
+
+ msb = i2c_smbus_read_byte_data(client, ISL29003_REG_MSB_SENSOR);
+ mutex_unlock(&data->lock);
+
+ if (msb < 0)
+ return msb;
+
+ range = isl29003_get_range(client);
+ bitdepth = (4 - isl29003_get_resolution(client)) * 4;
+ return (((msb << 8) | lsb) * gain_range[range]) >> bitdepth;
+}
+
+/*
+ * sysfs layer
+ */
+
+/* range */
+static ssize_t isl29003_show_range(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ return sprintf(buf, "%i\n", isl29003_get_range(client));
+}
+
+static ssize_t isl29003_store_range(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ unsigned long val;
+ int ret;
+
+ if ((strict_strtoul(buf, 10, &val) < 0) || (val > 3))
+ return -EINVAL;
+
+ ret = isl29003_set_range(client, val);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR(range, S_IWUSR | S_IRUGO,
+ isl29003_show_range, isl29003_store_range);
+
+
+/* resolution */
+static ssize_t isl29003_show_resolution(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ return sprintf(buf, "%d\n", isl29003_get_resolution(client));
+}
+
+static ssize_t isl29003_store_resolution(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ unsigned long val;
+ int ret;
+
+ if ((strict_strtoul(buf, 10, &val) < 0) || (val > 3))
+ return -EINVAL;
+
+ ret = isl29003_set_resolution(client, val);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR(resolution, S_IWUSR | S_IRUGO,
+ isl29003_show_resolution, isl29003_store_resolution);
+
+/* mode */
+static ssize_t isl29003_show_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ return sprintf(buf, "%d\n", isl29003_get_mode(client));
+}
+
+static ssize_t isl29003_store_mode(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ unsigned long val;
+ int ret;
+
+ if ((strict_strtoul(buf, 10, &val) < 0) || (val > 2))
+ return -EINVAL;
+
+ ret = isl29003_set_mode(client, val);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO,
+ isl29003_show_mode, isl29003_store_mode);
+
+
+/* power state */
+static ssize_t isl29003_show_power_state(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ return sprintf(buf, "%d\n", isl29003_get_power_state(client));
+}
+
+static ssize_t isl29003_store_power_state(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ unsigned long val;
+ int ret;
+
+ if ((strict_strtoul(buf, 10, &val) < 0) || (val > 1))
+ return -EINVAL;
+
+ ret = isl29003_set_power_state(client, val);
+ return ret ? ret : count;
+}
+
+static DEVICE_ATTR(power_state, S_IWUSR | S_IRUGO,
+ isl29003_show_power_state, isl29003_store_power_state);
+
+
+/* lux */
+static ssize_t isl29003_show_lux(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ /* No LUX data if not operational */
+ if (!isl29003_get_power_state(client))
+ return -EBUSY;
+
+ return sprintf(buf, "%d\n", isl29003_get_adc_value(client));
+}
+
+static DEVICE_ATTR(lux, S_IRUGO, isl29003_show_lux, NULL);
+
+static struct attribute *isl29003_attributes[] = {
+ &dev_attr_range.attr,
+ &dev_attr_resolution.attr,
+ &dev_attr_mode.attr,
+ &dev_attr_power_state.attr,
+ &dev_attr_lux.attr,
+ NULL
+};
+
+static const struct attribute_group isl29003_attr_group = {
+ .attrs = isl29003_attributes,
+};
+
+static int isl29003_init_client(struct i2c_client *client)
+{
+ struct isl29003_data *data = i2c_get_clientdata(client);
+ int i;
+
+ /* read all the registers once to fill the cache.
+ * if one of the reads fails, we consider the init failed */
+ for (i = 0; i < ARRAY_SIZE(data->reg_cache); i++) {
+ int v = i2c_smbus_read_byte_data(client, i);
+ if (v < 0)
+ return -ENODEV;
+
+ data->reg_cache[i] = v;
+ }
+
+ /* set defaults */
+ isl29003_set_range(client, 0);
+ isl29003_set_resolution(client, 0);
+ isl29003_set_mode(client, 0);
+ isl29003_set_power_state(client, 0);
+
+ return 0;
+}
+
+/*
+ * I2C layer
+ */
+
+static int __devinit isl29003_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct isl29003_data *data;
+ int err = 0;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE))
+ return -EIO;
+
+ data = kzalloc(sizeof(struct isl29003_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->client = client;
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->lock);
+
+ /* initialize the ISL29003 chip */
+ err = isl29003_init_client(client);
+ if (err)
+ goto exit_kfree;
+
+ /* register sysfs hooks */
+ err = sysfs_create_group(&client->dev.kobj, &isl29003_attr_group);
+ if (err)
+ goto exit_kfree;
+
+ dev_info(&client->dev, "driver version %s enabled\n", DRIVER_VERSION);
+ return 0;
+
+exit_kfree:
+ kfree(data);
+ return err;
+}
+
+static int __devexit isl29003_remove(struct i2c_client *client)
+{
+ sysfs_remove_group(&client->dev.kobj, &isl29003_attr_group);
+ isl29003_set_power_state(client, 0);
+ kfree(i2c_get_clientdata(client));
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int isl29003_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+ struct isl29003_data *data = i2c_get_clientdata(client);
+
+ data->power_state_before_suspend = isl29003_get_power_state(client);
+ return isl29003_set_power_state(client, 0);
+}
+
+static int isl29003_resume(struct i2c_client *client)
+{
+ int i;
+ struct isl29003_data *data = i2c_get_clientdata(client);
+
+ /* restore registers from cache */
+ for (i = 0; i < ARRAY_SIZE(data->reg_cache); i++)
+ if (i2c_smbus_write_byte_data(client, i, data->reg_cache[i]))
+ return -EIO;
+
+ return isl29003_set_power_state(client,
+ data->power_state_before_suspend);
+}
+
+#else
+#define isl29003_suspend NULL
+#define isl29003_resume NULL
+#endif /* CONFIG_PM */
+
+static const struct i2c_device_id isl29003_id[] = {
+ { "isl29003", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, isl29003_id);
+
+static struct i2c_driver isl29003_driver = {
+ .driver = {
+ .name = ISL29003_DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .suspend = isl29003_suspend,
+ .resume = isl29003_resume,
+ .probe = isl29003_probe,
+ .remove = __devexit_p(isl29003_remove),
+ .id_table = isl29003_id,
+};
+
+module_i2c_driver(isl29003_driver);
+
+MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>");
+MODULE_DESCRIPTION("ISL29003 ambient light sensor driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRIVER_VERSION);
diff --git a/drivers/misc/isl29020.c b/drivers/misc/isl29020.c
new file mode 100644
index 00000000..0aa08c74
--- /dev/null
+++ b/drivers/misc/isl29020.c
@@ -0,0 +1,237 @@
+/*
+ * isl29020.c - Intersil ALS Driver
+ *
+ * Copyright (C) 2008 Intel Corp
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * Data sheet at: http://www.intersil.com/data/fn/fn6505.pdf
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/sysfs.h>
+#include <linux/pm_runtime.h>
+
+static DEFINE_MUTEX(mutex);
+
+static ssize_t als_sensing_range_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int val;
+
+ val = i2c_smbus_read_byte_data(client, 0x00);
+
+ if (val < 0)
+ return val;
+ return sprintf(buf, "%d000\n", 1 << (2 * (val & 3)));
+
+}
+
+static ssize_t als_lux_input_data_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int ret_val, val;
+ unsigned long int lux;
+ int temp;
+
+ pm_runtime_get_sync(dev);
+ msleep(100);
+
+ mutex_lock(&mutex);
+ temp = i2c_smbus_read_byte_data(client, 0x02); /* MSB data */
+ if (temp < 0) {
+ pm_runtime_put_sync(dev);
+ mutex_unlock(&mutex);
+ return temp;
+ }
+
+ ret_val = i2c_smbus_read_byte_data(client, 0x01); /* LSB data */
+ mutex_unlock(&mutex);
+
+ if (ret_val < 0) {
+ pm_runtime_put_sync(dev);
+ return ret_val;
+ }
+
+ ret_val |= temp << 8;
+ val = i2c_smbus_read_byte_data(client, 0x00);
+ pm_runtime_put_sync(dev);
+ if (val < 0)
+ return val;
+ lux = ((((1 << (2 * (val & 3))))*1000) * ret_val) / 65536;
+ return sprintf(buf, "%ld\n", lux);
+}
+
+static ssize_t als_sensing_range_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int ret_val;
+ unsigned long val;
+
+ if (strict_strtoul(buf, 10, &val))
+ return -EINVAL;
+ if (val < 1 || val > 64000)
+ return -EINVAL;
+
+ /* Pick the smallest sensor range that will meet our requirements */
+ if (val <= 1000)
+ val = 1;
+ else if (val <= 4000)
+ val = 2;
+ else if (val <= 16000)
+ val = 3;
+ else
+ val = 4;
+
+ ret_val = i2c_smbus_read_byte_data(client, 0x00);
+ if (ret_val < 0)
+ return ret_val;
+
+ ret_val &= 0xFC; /*reset the bit before setting them */
+ ret_val |= val - 1;
+ ret_val = i2c_smbus_write_byte_data(client, 0x00, ret_val);
+
+ if (ret_val < 0)
+ return ret_val;
+ return count;
+}
+
+static void als_set_power_state(struct i2c_client *client, int enable)
+{
+ int ret_val;
+
+ ret_val = i2c_smbus_read_byte_data(client, 0x00);
+ if (ret_val < 0)
+ return;
+
+ if (enable)
+ ret_val |= 0x80;
+ else
+ ret_val &= 0x7F;
+
+ i2c_smbus_write_byte_data(client, 0x00, ret_val);
+}
+
+static DEVICE_ATTR(lux0_sensor_range, S_IRUGO | S_IWUSR,
+ als_sensing_range_show, als_sensing_range_store);
+static DEVICE_ATTR(lux0_input, S_IRUGO, als_lux_input_data_show, NULL);
+
+static struct attribute *mid_att_als[] = {
+ &dev_attr_lux0_sensor_range.attr,
+ &dev_attr_lux0_input.attr,
+ NULL
+};
+
+static struct attribute_group m_als_gr = {
+ .name = "isl29020",
+ .attrs = mid_att_als
+};
+
+static int als_set_default_config(struct i2c_client *client)
+{
+ int retval;
+
+ retval = i2c_smbus_write_byte_data(client, 0x00, 0xc0);
+ if (retval < 0) {
+ dev_err(&client->dev, "default write failed.");
+ return retval;
+ }
+ return 0;
+}
+
+static int isl29020_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int res;
+
+ res = als_set_default_config(client);
+ if (res < 0)
+ return res;
+
+ res = sysfs_create_group(&client->dev.kobj, &m_als_gr);
+ if (res) {
+ dev_err(&client->dev, "isl29020: device create file failed\n");
+ return res;
+ }
+ dev_info(&client->dev, "%s isl29020: ALS chip found\n", client->name);
+ als_set_power_state(client, 0);
+ pm_runtime_enable(&client->dev);
+ return res;
+}
+
+static int isl29020_remove(struct i2c_client *client)
+{
+ sysfs_remove_group(&client->dev.kobj, &m_als_gr);
+ return 0;
+}
+
+static struct i2c_device_id isl29020_id[] = {
+ { "isl29020", 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, isl29020_id);
+
+#ifdef CONFIG_PM
+
+static int isl29020_runtime_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ als_set_power_state(client, 0);
+ return 0;
+}
+
+static int isl29020_runtime_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ als_set_power_state(client, 1);
+ return 0;
+}
+
+static const struct dev_pm_ops isl29020_pm_ops = {
+ .runtime_suspend = isl29020_runtime_suspend,
+ .runtime_resume = isl29020_runtime_resume,
+};
+
+#define ISL29020_PM_OPS (&isl29020_pm_ops)
+#else /* CONFIG_PM */
+#define ISL29020_PM_OPS NULL
+#endif /* CONFIG_PM */
+
+static struct i2c_driver isl29020_driver = {
+ .driver = {
+ .name = "isl29020",
+ .pm = ISL29020_PM_OPS,
+ },
+ .probe = isl29020_probe,
+ .remove = isl29020_remove,
+ .id_table = isl29020_id,
+};
+
+module_i2c_driver(isl29020_driver);
+
+MODULE_AUTHOR("Kalhan Trisal <kalhan.trisal@intel.com>");
+MODULE_DESCRIPTION("Intersil isl29020 ALS Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/iwmc3200top/Kconfig b/drivers/misc/iwmc3200top/Kconfig
new file mode 100644
index 00000000..9e4b88fb
--- /dev/null
+++ b/drivers/misc/iwmc3200top/Kconfig
@@ -0,0 +1,20 @@
+config IWMC3200TOP
+ tristate "Intel Wireless MultiCom Top Driver"
+ depends on MMC && EXPERIMENTAL
+ select FW_LOADER
+ ---help---
+ Intel Wireless MultiCom 3200 Top driver is responsible for
+ for firmware load and enabled coms enumeration
+
+config IWMC3200TOP_DEBUG
+ bool "Enable full debug output of iwmc3200top Driver"
+ depends on IWMC3200TOP
+ ---help---
+ Enable full debug output of iwmc3200top Driver
+
+config IWMC3200TOP_DEBUGFS
+ bool "Enable Debugfs debugging interface for iwmc3200top"
+ depends on IWMC3200TOP
+ ---help---
+ Enable creation of debugfs files for iwmc3200top
+
diff --git a/drivers/misc/iwmc3200top/Makefile b/drivers/misc/iwmc3200top/Makefile
new file mode 100644
index 00000000..fbf53fb4
--- /dev/null
+++ b/drivers/misc/iwmc3200top/Makefile
@@ -0,0 +1,29 @@
+# iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
+# drivers/misc/iwmc3200top/Makefile
+#
+# Copyright (C) 2009 Intel Corporation. All rights reserved.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License version
+# 2 as published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+# 02110-1301, USA.
+#
+#
+# Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
+# -
+#
+#
+
+obj-$(CONFIG_IWMC3200TOP) += iwmc3200top.o
+iwmc3200top-objs := main.o fw-download.o
+iwmc3200top-$(CONFIG_IWMC3200TOP_DEBUG) += log.o
+iwmc3200top-$(CONFIG_IWMC3200TOP_DEBUGFS) += debugfs.o
diff --git a/drivers/misc/iwmc3200top/debugfs.c b/drivers/misc/iwmc3200top/debugfs.c
new file mode 100644
index 00000000..62fbaec4
--- /dev/null
+++ b/drivers/misc/iwmc3200top/debugfs.c
@@ -0,0 +1,137 @@
+/*
+ * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
+ * drivers/misc/iwmc3200top/debufs.c
+ *
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
+ * -
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio.h>
+#include <linux/debugfs.h>
+
+#include "iwmc3200top.h"
+#include "fw-msg.h"
+#include "log.h"
+#include "debugfs.h"
+
+
+
+/* Constants definition */
+#define HEXADECIMAL_RADIX 16
+
+/* Functions definition */
+
+
+#define DEBUGFS_ADD(name, parent) do { \
+ dbgfs->dbgfs_##parent##_files.file_##name = \
+ debugfs_create_file(#name, 0644, dbgfs->dir_##parent, priv, \
+ &iwmct_dbgfs_##name##_ops); \
+} while (0)
+
+#define DEBUGFS_RM(name) do { \
+ debugfs_remove(name); \
+ name = NULL; \
+} while (0)
+
+#define DEBUGFS_READ_FUNC(name) \
+ssize_t iwmct_dbgfs_##name##_read(struct file *file, \
+ char __user *user_buf, \
+ size_t count, loff_t *ppos);
+
+#define DEBUGFS_WRITE_FUNC(name) \
+ssize_t iwmct_dbgfs_##name##_write(struct file *file, \
+ const char __user *user_buf, \
+ size_t count, loff_t *ppos);
+
+#define DEBUGFS_READ_FILE_OPS(name) \
+ DEBUGFS_READ_FUNC(name) \
+ static const struct file_operations iwmct_dbgfs_##name##_ops = { \
+ .read = iwmct_dbgfs_##name##_read, \
+ .open = iwmct_dbgfs_open_file_generic, \
+ .llseek = generic_file_llseek, \
+ };
+
+#define DEBUGFS_WRITE_FILE_OPS(name) \
+ DEBUGFS_WRITE_FUNC(name) \
+ static const struct file_operations iwmct_dbgfs_##name##_ops = { \
+ .write = iwmct_dbgfs_##name##_write, \
+ .open = iwmct_dbgfs_open_file_generic, \
+ .llseek = generic_file_llseek, \
+ };
+
+#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
+ DEBUGFS_READ_FUNC(name) \
+ DEBUGFS_WRITE_FUNC(name) \
+ static const struct file_operations iwmct_dbgfs_##name##_ops = {\
+ .write = iwmct_dbgfs_##name##_write, \
+ .read = iwmct_dbgfs_##name##_read, \
+ .open = iwmct_dbgfs_open_file_generic, \
+ .llseek = generic_file_llseek, \
+ };
+
+
+/* Debugfs file ops definitions */
+
+/*
+ * Create the debugfs files and directories
+ *
+ */
+void iwmct_dbgfs_register(struct iwmct_priv *priv, const char *name)
+{
+ struct iwmct_debugfs *dbgfs;
+
+ dbgfs = kzalloc(sizeof(struct iwmct_debugfs), GFP_KERNEL);
+ if (!dbgfs) {
+ LOG_ERROR(priv, DEBUGFS, "failed to allocate %zd bytes\n",
+ sizeof(struct iwmct_debugfs));
+ return;
+ }
+
+ priv->dbgfs = dbgfs;
+ dbgfs->name = name;
+ dbgfs->dir_drv = debugfs_create_dir(name, NULL);
+ if (!dbgfs->dir_drv) {
+ LOG_ERROR(priv, DEBUGFS, "failed to create debugfs dir\n");
+ return;
+ }
+
+ return;
+}
+
+/**
+ * Remove the debugfs files and directories
+ *
+ */
+void iwmct_dbgfs_unregister(struct iwmct_debugfs *dbgfs)
+{
+ if (!dbgfs)
+ return;
+
+ DEBUGFS_RM(dbgfs->dir_drv);
+ kfree(dbgfs);
+ dbgfs = NULL;
+}
+
diff --git a/drivers/misc/iwmc3200top/debugfs.h b/drivers/misc/iwmc3200top/debugfs.h
new file mode 100644
index 00000000..71d45759
--- /dev/null
+++ b/drivers/misc/iwmc3200top/debugfs.h
@@ -0,0 +1,58 @@
+/*
+ * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
+ * drivers/misc/iwmc3200top/debufs.h
+ *
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
+ * -
+ *
+ */
+
+#ifndef __DEBUGFS_H__
+#define __DEBUGFS_H__
+
+
+#ifdef CONFIG_IWMC3200TOP_DEBUGFS
+
+struct iwmct_debugfs {
+ const char *name;
+ struct dentry *dir_drv;
+ struct dir_drv_files {
+ } dbgfs_drv_files;
+};
+
+void iwmct_dbgfs_register(struct iwmct_priv *priv, const char *name);
+void iwmct_dbgfs_unregister(struct iwmct_debugfs *dbgfs);
+
+#else /* CONFIG_IWMC3200TOP_DEBUGFS */
+
+struct iwmct_debugfs;
+
+static inline void
+iwmct_dbgfs_register(struct iwmct_priv *priv, const char *name)
+{}
+
+static inline void
+iwmct_dbgfs_unregister(struct iwmct_debugfs *dbgfs)
+{}
+
+#endif /* CONFIG_IWMC3200TOP_DEBUGFS */
+
+#endif /* __DEBUGFS_H__ */
+
diff --git a/drivers/misc/iwmc3200top/fw-download.c b/drivers/misc/iwmc3200top/fw-download.c
new file mode 100644
index 00000000..e27afde6
--- /dev/null
+++ b/drivers/misc/iwmc3200top/fw-download.c
@@ -0,0 +1,358 @@
+/*
+ * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
+ * drivers/misc/iwmc3200top/fw-download.c
+ *
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
+ * -
+ *
+ */
+
+#include <linux/firmware.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/slab.h>
+#include <asm/unaligned.h>
+
+#include "iwmc3200top.h"
+#include "log.h"
+#include "fw-msg.h"
+
+#define CHECKSUM_BYTES_NUM sizeof(u32)
+
+/**
+ init parser struct with file
+ */
+static int iwmct_fw_parser_init(struct iwmct_priv *priv, const u8 *file,
+ size_t file_size, size_t block_size)
+{
+ struct iwmct_parser *parser = &priv->parser;
+ struct iwmct_fw_hdr *fw_hdr = &parser->versions;
+
+ LOG_TRACE(priv, FW_DOWNLOAD, "-->\n");
+
+ LOG_INFO(priv, FW_DOWNLOAD, "file_size=%zd\n", file_size);
+
+ parser->file = file;
+ parser->file_size = file_size;
+ parser->cur_pos = 0;
+ parser->entry_point = 0;
+ parser->buf = kzalloc(block_size, GFP_KERNEL);
+ if (!parser->buf) {
+ LOG_ERROR(priv, FW_DOWNLOAD, "kzalloc error\n");
+ return -ENOMEM;
+ }
+ parser->buf_size = block_size;
+
+ /* extract fw versions */
+ memcpy(fw_hdr, parser->file, sizeof(struct iwmct_fw_hdr));
+ LOG_INFO(priv, FW_DOWNLOAD, "fw versions are:\n"
+ "top %u.%u.%u gps %u.%u.%u bt %u.%u.%u tic %s\n",
+ fw_hdr->top_major, fw_hdr->top_minor, fw_hdr->top_revision,
+ fw_hdr->gps_major, fw_hdr->gps_minor, fw_hdr->gps_revision,
+ fw_hdr->bt_major, fw_hdr->bt_minor, fw_hdr->bt_revision,
+ fw_hdr->tic_name);
+
+ parser->cur_pos += sizeof(struct iwmct_fw_hdr);
+
+ LOG_TRACE(priv, FW_DOWNLOAD, "<--\n");
+ return 0;
+}
+
+static bool iwmct_checksum(struct iwmct_priv *priv)
+{
+ struct iwmct_parser *parser = &priv->parser;
+ __le32 *file = (__le32 *)parser->file;
+ int i, pad, steps;
+ u32 accum = 0;
+ u32 checksum;
+ u32 mask = 0xffffffff;
+
+ pad = (parser->file_size - CHECKSUM_BYTES_NUM) % 4;
+ steps = (parser->file_size - CHECKSUM_BYTES_NUM) / 4;
+
+ LOG_INFO(priv, FW_DOWNLOAD, "pad=%d steps=%d\n", pad, steps);
+
+ for (i = 0; i < steps; i++)
+ accum += le32_to_cpu(file[i]);
+
+ if (pad) {
+ mask <<= 8 * (4 - pad);
+ accum += le32_to_cpu(file[steps]) & mask;
+ }
+
+ checksum = get_unaligned_le32((__le32 *)(parser->file +
+ parser->file_size - CHECKSUM_BYTES_NUM));
+
+ LOG_INFO(priv, FW_DOWNLOAD,
+ "compare checksum accum=0x%x to checksum=0x%x\n",
+ accum, checksum);
+
+ return checksum == accum;
+}
+
+static int iwmct_parse_next_section(struct iwmct_priv *priv, const u8 **p_sec,
+ size_t *sec_size, __le32 *sec_addr)
+{
+ struct iwmct_parser *parser = &priv->parser;
+ struct iwmct_dbg *dbg = &priv->dbg;
+ struct iwmct_fw_sec_hdr *sec_hdr;
+
+ LOG_TRACE(priv, FW_DOWNLOAD, "-->\n");
+
+ while (parser->cur_pos + sizeof(struct iwmct_fw_sec_hdr)
+ <= parser->file_size) {
+
+ sec_hdr = (struct iwmct_fw_sec_hdr *)
+ (parser->file + parser->cur_pos);
+ parser->cur_pos += sizeof(struct iwmct_fw_sec_hdr);
+
+ LOG_INFO(priv, FW_DOWNLOAD,
+ "sec hdr: type=%s addr=0x%x size=%d\n",
+ sec_hdr->type, sec_hdr->target_addr,
+ sec_hdr->data_size);
+
+ if (strcmp(sec_hdr->type, "ENT") == 0)
+ parser->entry_point = le32_to_cpu(sec_hdr->target_addr);
+ else if (strcmp(sec_hdr->type, "LBL") == 0)
+ strcpy(dbg->label_fw, parser->file + parser->cur_pos);
+ else if (((strcmp(sec_hdr->type, "TOP") == 0) &&
+ (priv->barker & BARKER_DNLOAD_TOP_MSK)) ||
+ ((strcmp(sec_hdr->type, "GPS") == 0) &&
+ (priv->barker & BARKER_DNLOAD_GPS_MSK)) ||
+ ((strcmp(sec_hdr->type, "BTH") == 0) &&
+ (priv->barker & BARKER_DNLOAD_BT_MSK))) {
+ *sec_addr = sec_hdr->target_addr;
+ *sec_size = le32_to_cpu(sec_hdr->data_size);
+ *p_sec = parser->file + parser->cur_pos;
+ parser->cur_pos += le32_to_cpu(sec_hdr->data_size);
+ return 1;
+ } else if (strcmp(sec_hdr->type, "LOG") != 0)
+ LOG_WARNING(priv, FW_DOWNLOAD,
+ "skipping section type %s\n",
+ sec_hdr->type);
+
+ parser->cur_pos += le32_to_cpu(sec_hdr->data_size);
+ LOG_INFO(priv, FW_DOWNLOAD,
+ "finished with section cur_pos=%zd\n", parser->cur_pos);
+ }
+
+ LOG_TRACE(priv, INIT, "<--\n");
+ return 0;
+}
+
+static int iwmct_download_section(struct iwmct_priv *priv, const u8 *p_sec,
+ size_t sec_size, __le32 addr)
+{
+ struct iwmct_parser *parser = &priv->parser;
+ struct iwmct_fw_load_hdr *hdr = (struct iwmct_fw_load_hdr *)parser->buf;
+ const u8 *cur_block = p_sec;
+ size_t sent = 0;
+ int cnt = 0;
+ int ret = 0;
+ u32 cmd = 0;
+
+ LOG_TRACE(priv, FW_DOWNLOAD, "-->\n");
+ LOG_INFO(priv, FW_DOWNLOAD, "Download address 0x%x size 0x%zx\n",
+ addr, sec_size);
+
+ while (sent < sec_size) {
+ int i;
+ u32 chksm = 0;
+ u32 reset = atomic_read(&priv->reset);
+ /* actual FW data */
+ u32 data_size = min(parser->buf_size - sizeof(*hdr),
+ sec_size - sent);
+ /* Pad to block size */
+ u32 trans_size = (data_size + sizeof(*hdr) +
+ IWMC_SDIO_BLK_SIZE - 1) &
+ ~(IWMC_SDIO_BLK_SIZE - 1);
+ ++cnt;
+
+ /* in case of reset, interrupt FW DOWNLAOD */
+ if (reset) {
+ LOG_INFO(priv, FW_DOWNLOAD,
+ "Reset detected. Abort FW download!!!");
+ ret = -ECANCELED;
+ goto exit;
+ }
+
+ memset(parser->buf, 0, parser->buf_size);
+ cmd |= IWMC_OPCODE_WRITE << CMD_HDR_OPCODE_POS;
+ cmd |= IWMC_CMD_SIGNATURE << CMD_HDR_SIGNATURE_POS;
+ cmd |= (priv->dbg.direct ? 1 : 0) << CMD_HDR_DIRECT_ACCESS_POS;
+ cmd |= (priv->dbg.checksum ? 1 : 0) << CMD_HDR_USE_CHECKSUM_POS;
+ hdr->data_size = cpu_to_le32(data_size);
+ hdr->target_addr = addr;
+
+ /* checksum is allowed for sizes divisible by 4 */
+ if (data_size & 0x3)
+ cmd &= ~CMD_HDR_USE_CHECKSUM_MSK;
+
+ memcpy(hdr->data, cur_block, data_size);
+
+
+ if (cmd & CMD_HDR_USE_CHECKSUM_MSK) {
+
+ chksm = data_size + le32_to_cpu(addr) + cmd;
+ for (i = 0; i < data_size >> 2; i++)
+ chksm += ((u32 *)cur_block)[i];
+
+ hdr->block_chksm = cpu_to_le32(chksm);
+ LOG_INFO(priv, FW_DOWNLOAD, "Checksum = 0x%X\n",
+ hdr->block_chksm);
+ }
+
+ LOG_INFO(priv, FW_DOWNLOAD, "trans#%d, len=%d, sent=%zd, "
+ "sec_size=%zd, startAddress 0x%X\n",
+ cnt, trans_size, sent, sec_size, addr);
+
+ if (priv->dbg.dump)
+ LOG_HEXDUMP(FW_DOWNLOAD, parser->buf, trans_size);
+
+
+ hdr->cmd = cpu_to_le32(cmd);
+ /* send it down */
+ /* TODO: add more proper sending and error checking */
+ ret = iwmct_tx(priv, parser->buf, trans_size);
+ if (ret != 0) {
+ LOG_INFO(priv, FW_DOWNLOAD,
+ "iwmct_tx returned %d\n", ret);
+ goto exit;
+ }
+
+ addr = cpu_to_le32(le32_to_cpu(addr) + data_size);
+ sent += data_size;
+ cur_block = p_sec + sent;
+
+ if (priv->dbg.blocks && (cnt + 1) >= priv->dbg.blocks) {
+ LOG_INFO(priv, FW_DOWNLOAD,
+ "Block number limit is reached [%d]\n",
+ priv->dbg.blocks);
+ break;
+ }
+ }
+
+ if (sent < sec_size)
+ ret = -EINVAL;
+exit:
+ LOG_TRACE(priv, FW_DOWNLOAD, "<--\n");
+ return ret;
+}
+
+static int iwmct_kick_fw(struct iwmct_priv *priv, bool jump)
+{
+ struct iwmct_parser *parser = &priv->parser;
+ struct iwmct_fw_load_hdr *hdr = (struct iwmct_fw_load_hdr *)parser->buf;
+ int ret;
+ u32 cmd;
+
+ LOG_TRACE(priv, FW_DOWNLOAD, "-->\n");
+
+ memset(parser->buf, 0, parser->buf_size);
+ cmd = IWMC_CMD_SIGNATURE << CMD_HDR_SIGNATURE_POS;
+ if (jump) {
+ cmd |= IWMC_OPCODE_JUMP << CMD_HDR_OPCODE_POS;
+ hdr->target_addr = cpu_to_le32(parser->entry_point);
+ LOG_INFO(priv, FW_DOWNLOAD, "jump address 0x%x\n",
+ parser->entry_point);
+ } else {
+ cmd |= IWMC_OPCODE_LAST_COMMAND << CMD_HDR_OPCODE_POS;
+ LOG_INFO(priv, FW_DOWNLOAD, "last command\n");
+ }
+
+ hdr->cmd = cpu_to_le32(cmd);
+
+ LOG_HEXDUMP(FW_DOWNLOAD, parser->buf, sizeof(*hdr));
+ /* send it down */
+ /* TODO: add more proper sending and error checking */
+ ret = iwmct_tx(priv, parser->buf, IWMC_SDIO_BLK_SIZE);
+ if (ret)
+ LOG_INFO(priv, FW_DOWNLOAD, "iwmct_tx returned %d", ret);
+
+ LOG_TRACE(priv, FW_DOWNLOAD, "<--\n");
+ return 0;
+}
+
+int iwmct_fw_load(struct iwmct_priv *priv)
+{
+ const u8 *fw_name = FW_NAME(FW_API_VER);
+ const struct firmware *raw;
+ const u8 *pdata;
+ size_t len;
+ __le32 addr;
+ int ret;
+
+
+ LOG_INFO(priv, FW_DOWNLOAD, "barker download request 0x%x is:\n",
+ priv->barker);
+ LOG_INFO(priv, FW_DOWNLOAD, "******* Top FW %s requested ********\n",
+ (priv->barker & BARKER_DNLOAD_TOP_MSK) ? "was" : "not");
+ LOG_INFO(priv, FW_DOWNLOAD, "******* GPS FW %s requested ********\n",
+ (priv->barker & BARKER_DNLOAD_GPS_MSK) ? "was" : "not");
+ LOG_INFO(priv, FW_DOWNLOAD, "******* BT FW %s requested ********\n",
+ (priv->barker & BARKER_DNLOAD_BT_MSK) ? "was" : "not");
+
+
+ /* get the firmware */
+ ret = request_firmware(&raw, fw_name, &priv->func->dev);
+ if (ret < 0) {
+ LOG_ERROR(priv, FW_DOWNLOAD, "%s request_firmware failed %d\n",
+ fw_name, ret);
+ goto exit;
+ }
+
+ if (raw->size < sizeof(struct iwmct_fw_sec_hdr)) {
+ LOG_ERROR(priv, FW_DOWNLOAD, "%s smaller then (%zd) (%zd)\n",
+ fw_name, sizeof(struct iwmct_fw_sec_hdr), raw->size);
+ goto exit;
+ }
+
+ LOG_INFO(priv, FW_DOWNLOAD, "Read firmware '%s'\n", fw_name);
+
+ /* clear parser struct */
+ ret = iwmct_fw_parser_init(priv, raw->data, raw->size, priv->trans_len);
+ if (ret < 0) {
+ LOG_ERROR(priv, FW_DOWNLOAD,
+ "iwmct_parser_init failed: Reason %d\n", ret);
+ goto exit;
+ }
+
+ if (!iwmct_checksum(priv)) {
+ LOG_ERROR(priv, FW_DOWNLOAD, "checksum error\n");
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ /* download firmware to device */
+ while (iwmct_parse_next_section(priv, &pdata, &len, &addr)) {
+ ret = iwmct_download_section(priv, pdata, len, addr);
+ if (ret) {
+ LOG_ERROR(priv, FW_DOWNLOAD,
+ "%s download section failed\n", fw_name);
+ goto exit;
+ }
+ }
+
+ ret = iwmct_kick_fw(priv, !!(priv->barker & BARKER_DNLOAD_JUMP_MSK));
+
+exit:
+ kfree(priv->parser.buf);
+ release_firmware(raw);
+ return ret;
+}
diff --git a/drivers/misc/iwmc3200top/fw-msg.h b/drivers/misc/iwmc3200top/fw-msg.h
new file mode 100644
index 00000000..9e26b75b
--- /dev/null
+++ b/drivers/misc/iwmc3200top/fw-msg.h
@@ -0,0 +1,113 @@
+/*
+ * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
+ * drivers/misc/iwmc3200top/fw-msg.h
+ *
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
+ * -
+ *
+ */
+
+#ifndef __FWMSG_H__
+#define __FWMSG_H__
+
+#define COMM_TYPE_D2H 0xFF
+#define COMM_TYPE_H2D 0xEE
+
+#define COMM_CATEGORY_OPERATIONAL 0x00
+#define COMM_CATEGORY_DEBUG 0x01
+#define COMM_CATEGORY_TESTABILITY 0x02
+#define COMM_CATEGORY_DIAGNOSTICS 0x03
+
+#define OP_DBG_ZSTR_MSG cpu_to_le16(0x1A)
+
+#define FW_LOG_SRC_MAX 32
+#define FW_LOG_SRC_ALL 255
+
+#define FW_STRING_TABLE_ADDR cpu_to_le32(0x0C000000)
+
+#define CMD_DBG_LOG_LEVEL cpu_to_le16(0x0001)
+#define CMD_TST_DEV_RESET cpu_to_le16(0x0060)
+#define CMD_TST_FUNC_RESET cpu_to_le16(0x0062)
+#define CMD_TST_IFACE_RESET cpu_to_le16(0x0064)
+#define CMD_TST_CPU_UTILIZATION cpu_to_le16(0x0065)
+#define CMD_TST_TOP_DEEP_SLEEP cpu_to_le16(0x0080)
+#define CMD_TST_WAKEUP cpu_to_le16(0x0081)
+#define CMD_TST_FUNC_WAKEUP cpu_to_le16(0x0082)
+#define CMD_TST_FUNC_DEEP_SLEEP_REQUEST cpu_to_le16(0x0083)
+#define CMD_TST_GET_MEM_DUMP cpu_to_le16(0x0096)
+
+#define OP_OPR_ALIVE cpu_to_le16(0x0010)
+#define OP_OPR_CMD_ACK cpu_to_le16(0x001F)
+#define OP_OPR_CMD_NACK cpu_to_le16(0x0020)
+#define OP_TST_MEM_DUMP cpu_to_le16(0x0043)
+
+#define CMD_FLAG_PADDING_256 0x80
+
+#define FW_HCMD_BLOCK_SIZE 256
+
+struct msg_hdr {
+ u8 type;
+ u8 category;
+ __le16 opcode;
+ u8 seqnum;
+ u8 flags;
+ __le16 length;
+} __attribute__((__packed__));
+
+struct log_hdr {
+ __le32 timestamp;
+ u8 severity;
+ u8 logsource;
+ __le16 reserved;
+} __attribute__((__packed__));
+
+struct mdump_hdr {
+ u8 dmpid;
+ u8 frag;
+ __le16 size;
+ __le32 addr;
+} __attribute__((__packed__));
+
+struct top_msg {
+ struct msg_hdr hdr;
+ union {
+ /* D2H messages */
+ struct {
+ struct log_hdr log_hdr;
+ u8 data[1];
+ } __attribute__((__packed__)) log;
+
+ struct {
+ struct log_hdr log_hdr;
+ struct mdump_hdr md_hdr;
+ u8 data[1];
+ } __attribute__((__packed__)) mdump;
+
+ /* H2D messages */
+ struct {
+ u8 logsource;
+ u8 sevmask;
+ } __attribute__((__packed__)) logdefs[FW_LOG_SRC_MAX];
+ struct mdump_hdr mdump_req;
+ } u;
+} __attribute__((__packed__));
+
+
+#endif /* __FWMSG_H__ */
diff --git a/drivers/misc/iwmc3200top/iwmc3200top.h b/drivers/misc/iwmc3200top/iwmc3200top.h
new file mode 100644
index 00000000..620973ed
--- /dev/null
+++ b/drivers/misc/iwmc3200top/iwmc3200top.h
@@ -0,0 +1,205 @@
+/*
+ * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
+ * drivers/misc/iwmc3200top/iwmc3200top.h
+ *
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
+ * -
+ *
+ */
+
+#ifndef __IWMC3200TOP_H__
+#define __IWMC3200TOP_H__
+
+#include <linux/workqueue.h>
+
+#define DRV_NAME "iwmc3200top"
+#define FW_API_VER 1
+#define _FW_NAME(api) DRV_NAME "." #api ".fw"
+#define FW_NAME(api) _FW_NAME(api)
+
+#define IWMC_SDIO_BLK_SIZE 256
+#define IWMC_DEFAULT_TR_BLK 64
+#define IWMC_SDIO_DATA_ADDR 0x0
+#define IWMC_SDIO_INTR_ENABLE_ADDR 0x14
+#define IWMC_SDIO_INTR_STATUS_ADDR 0x13
+#define IWMC_SDIO_INTR_CLEAR_ADDR 0x13
+#define IWMC_SDIO_INTR_GET_SIZE_ADDR 0x2C
+
+#define COMM_HUB_HEADER_LENGTH 16
+#define LOGGER_HEADER_LENGTH 10
+
+
+#define BARKER_DNLOAD_BT_POS 0
+#define BARKER_DNLOAD_BT_MSK BIT(BARKER_DNLOAD_BT_POS)
+#define BARKER_DNLOAD_GPS_POS 1
+#define BARKER_DNLOAD_GPS_MSK BIT(BARKER_DNLOAD_GPS_POS)
+#define BARKER_DNLOAD_TOP_POS 2
+#define BARKER_DNLOAD_TOP_MSK BIT(BARKER_DNLOAD_TOP_POS)
+#define BARKER_DNLOAD_RESERVED1_POS 3
+#define BARKER_DNLOAD_RESERVED1_MSK BIT(BARKER_DNLOAD_RESERVED1_POS)
+#define BARKER_DNLOAD_JUMP_POS 4
+#define BARKER_DNLOAD_JUMP_MSK BIT(BARKER_DNLOAD_JUMP_POS)
+#define BARKER_DNLOAD_SYNC_POS 5
+#define BARKER_DNLOAD_SYNC_MSK BIT(BARKER_DNLOAD_SYNC_POS)
+#define BARKER_DNLOAD_RESERVED2_POS 6
+#define BARKER_DNLOAD_RESERVED2_MSK (0x3 << BARKER_DNLOAD_RESERVED2_POS)
+#define BARKER_DNLOAD_BARKER_POS 8
+#define BARKER_DNLOAD_BARKER_MSK (0xffffff << BARKER_DNLOAD_BARKER_POS)
+
+#define IWMC_BARKER_REBOOT (0xdeadbe << BARKER_DNLOAD_BARKER_POS)
+/* whole field barker */
+#define IWMC_BARKER_ACK 0xfeedbabe
+
+#define IWMC_CMD_SIGNATURE 0xcbbc
+
+#define CMD_HDR_OPCODE_POS 0
+#define CMD_HDR_OPCODE_MSK_MSK (0xf << CMD_HDR_OPCODE_MSK_POS)
+#define CMD_HDR_RESPONSE_CODE_POS 4
+#define CMD_HDR_RESPONSE_CODE_MSK (0xf << CMD_HDR_RESPONSE_CODE_POS)
+#define CMD_HDR_USE_CHECKSUM_POS 8
+#define CMD_HDR_USE_CHECKSUM_MSK BIT(CMD_HDR_USE_CHECKSUM_POS)
+#define CMD_HDR_RESPONSE_REQUIRED_POS 9
+#define CMD_HDR_RESPONSE_REQUIRED_MSK BIT(CMD_HDR_RESPONSE_REQUIRED_POS)
+#define CMD_HDR_DIRECT_ACCESS_POS 10
+#define CMD_HDR_DIRECT_ACCESS_MSK BIT(CMD_HDR_DIRECT_ACCESS_POS)
+#define CMD_HDR_RESERVED_POS 11
+#define CMD_HDR_RESERVED_MSK BIT(0x1f << CMD_HDR_RESERVED_POS)
+#define CMD_HDR_SIGNATURE_POS 16
+#define CMD_HDR_SIGNATURE_MSK BIT(0xffff << CMD_HDR_SIGNATURE_POS)
+
+enum {
+ IWMC_OPCODE_PING = 0,
+ IWMC_OPCODE_READ = 1,
+ IWMC_OPCODE_WRITE = 2,
+ IWMC_OPCODE_JUMP = 3,
+ IWMC_OPCODE_REBOOT = 4,
+ IWMC_OPCODE_PERSISTENT_WRITE = 5,
+ IWMC_OPCODE_PERSISTENT_READ = 6,
+ IWMC_OPCODE_READ_MODIFY_WRITE = 7,
+ IWMC_OPCODE_LAST_COMMAND = 15
+};
+
+struct iwmct_fw_load_hdr {
+ __le32 cmd;
+ __le32 target_addr;
+ __le32 data_size;
+ __le32 block_chksm;
+ u8 data[0];
+};
+
+/**
+ * struct iwmct_fw_hdr
+ * holds all sw components versions
+ */
+struct iwmct_fw_hdr {
+ u8 top_major;
+ u8 top_minor;
+ u8 top_revision;
+ u8 gps_major;
+ u8 gps_minor;
+ u8 gps_revision;
+ u8 bt_major;
+ u8 bt_minor;
+ u8 bt_revision;
+ u8 tic_name[31];
+};
+
+/**
+ * struct iwmct_fw_sec_hdr
+ * @type: function type
+ * @data_size: section's data size
+ * @target_addr: download address
+ */
+struct iwmct_fw_sec_hdr {
+ u8 type[4];
+ __le32 data_size;
+ __le32 target_addr;
+};
+
+/**
+ * struct iwmct_parser
+ * @file: fw image
+ * @file_size: fw size
+ * @cur_pos: position in file
+ * @buf: temp buf for download
+ * @buf_size: size of buf
+ * @entry_point: address to jump in fw kick-off
+ */
+struct iwmct_parser {
+ const u8 *file;
+ size_t file_size;
+ size_t cur_pos;
+ u8 *buf;
+ size_t buf_size;
+ u32 entry_point;
+ struct iwmct_fw_hdr versions;
+};
+
+
+struct iwmct_work_struct {
+ struct list_head list;
+ ssize_t iosize;
+};
+
+struct iwmct_dbg {
+ int blocks;
+ bool dump;
+ bool jump;
+ bool direct;
+ bool checksum;
+ bool fw_download;
+ int block_size;
+ int download_trans_blks;
+
+ char label_fw[256];
+};
+
+struct iwmct_debugfs;
+
+struct iwmct_priv {
+ struct sdio_func *func;
+ struct iwmct_debugfs *dbgfs;
+ struct iwmct_parser parser;
+ atomic_t reset;
+ atomic_t dev_sync;
+ u32 trans_len;
+ u32 barker;
+ struct iwmct_dbg dbg;
+
+ /* drivers work items */
+ struct work_struct bus_rescan_worker;
+ struct work_struct isr_worker;
+
+ /* drivers wait queue */
+ wait_queue_head_t wait_q;
+
+ /* rx request list */
+ struct list_head read_req_list;
+};
+
+extern int iwmct_tx(struct iwmct_priv *priv, void *src, int count);
+extern int iwmct_fw_load(struct iwmct_priv *priv);
+
+extern void iwmct_dbg_init_params(struct iwmct_priv *drv);
+extern void iwmct_dbg_init_drv_attrs(struct device_driver *drv);
+extern void iwmct_dbg_remove_drv_attrs(struct device_driver *drv);
+extern int iwmct_send_hcmd(struct iwmct_priv *priv, u8 *cmd, u16 len);
+
+#endif /* __IWMC3200TOP_H__ */
diff --git a/drivers/misc/iwmc3200top/log.c b/drivers/misc/iwmc3200top/log.c
new file mode 100644
index 00000000..a36a55a4
--- /dev/null
+++ b/drivers/misc/iwmc3200top/log.c
@@ -0,0 +1,348 @@
+/*
+ * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
+ * drivers/misc/iwmc3200top/log.c
+ *
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
+ * -
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/slab.h>
+#include <linux/ctype.h>
+#include "fw-msg.h"
+#include "iwmc3200top.h"
+#include "log.h"
+
+/* Maximal hexadecimal string size of the FW memdump message */
+#define LOG_MSG_SIZE_MAX 12400
+
+/* iwmct_logdefs is a global used by log macros */
+u8 iwmct_logdefs[LOG_SRC_MAX];
+static u8 iwmct_fw_logdefs[FW_LOG_SRC_MAX];
+
+
+static int _log_set_log_filter(u8 *logdefs, int size, u8 src, u8 logmask)
+{
+ int i;
+
+ if (src < size)
+ logdefs[src] = logmask;
+ else if (src == LOG_SRC_ALL)
+ for (i = 0; i < size; i++)
+ logdefs[i] = logmask;
+ else
+ return -1;
+
+ return 0;
+}
+
+
+int iwmct_log_set_filter(u8 src, u8 logmask)
+{
+ return _log_set_log_filter(iwmct_logdefs, LOG_SRC_MAX, src, logmask);
+}
+
+
+int iwmct_log_set_fw_filter(u8 src, u8 logmask)
+{
+ return _log_set_log_filter(iwmct_fw_logdefs,
+ FW_LOG_SRC_MAX, src, logmask);
+}
+
+
+static int log_msg_format_hex(char *str, int slen, u8 *ibuf,
+ int ilen, char *pref)
+{
+ int pos = 0;
+ int i;
+ int len;
+
+ for (pos = 0, i = 0; pos < slen - 2 && pref[i] != '\0'; i++, pos++)
+ str[pos] = pref[i];
+
+ for (i = 0; pos < slen - 2 && i < ilen; pos += len, i++)
+ len = snprintf(&str[pos], slen - pos - 1, " %2.2X", ibuf[i]);
+
+ if (i < ilen)
+ return -1;
+
+ return 0;
+}
+
+/* NOTE: This function is not thread safe.
+ Currently it's called only from sdio rx worker - no race there
+*/
+void iwmct_log_top_message(struct iwmct_priv *priv, u8 *buf, int len)
+{
+ struct top_msg *msg;
+ static char logbuf[LOG_MSG_SIZE_MAX];
+
+ msg = (struct top_msg *)buf;
+
+ if (len < sizeof(msg->hdr) + sizeof(msg->u.log.log_hdr)) {
+ LOG_ERROR(priv, FW_MSG, "Log message from TOP "
+ "is too short %d (expected %zd)\n",
+ len, sizeof(msg->hdr) + sizeof(msg->u.log.log_hdr));
+ return;
+ }
+
+ if (!(iwmct_fw_logdefs[msg->u.log.log_hdr.logsource] &
+ BIT(msg->u.log.log_hdr.severity)) ||
+ !(iwmct_logdefs[LOG_SRC_FW_MSG] & BIT(msg->u.log.log_hdr.severity)))
+ return;
+
+ switch (msg->hdr.category) {
+ case COMM_CATEGORY_TESTABILITY:
+ if (!(iwmct_logdefs[LOG_SRC_TST] &
+ BIT(msg->u.log.log_hdr.severity)))
+ return;
+ if (log_msg_format_hex(logbuf, LOG_MSG_SIZE_MAX, buf,
+ le16_to_cpu(msg->hdr.length) +
+ sizeof(msg->hdr), "<TST>"))
+ LOG_WARNING(priv, TST,
+ "TOP TST message is too long, truncating...");
+ LOG_WARNING(priv, TST, "%s\n", logbuf);
+ break;
+ case COMM_CATEGORY_DEBUG:
+ if (msg->hdr.opcode == OP_DBG_ZSTR_MSG)
+ LOG_INFO(priv, FW_MSG, "%s %s", "<DBG>",
+ ((u8 *)msg) + sizeof(msg->hdr)
+ + sizeof(msg->u.log.log_hdr));
+ else {
+ if (log_msg_format_hex(logbuf, LOG_MSG_SIZE_MAX, buf,
+ le16_to_cpu(msg->hdr.length)
+ + sizeof(msg->hdr),
+ "<DBG>"))
+ LOG_WARNING(priv, FW_MSG,
+ "TOP DBG message is too long,"
+ "truncating...");
+ LOG_WARNING(priv, FW_MSG, "%s\n", logbuf);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static int _log_get_filter_str(u8 *logdefs, int logdefsz, char *buf, int size)
+{
+ int i, pos, len;
+ for (i = 0, pos = 0; (pos < size-1) && (i < logdefsz); i++) {
+ len = snprintf(&buf[pos], size - pos - 1, "0x%02X%02X,",
+ i, logdefs[i]);
+ pos += len;
+ }
+ buf[pos-1] = '\n';
+ buf[pos] = '\0';
+
+ if (i < logdefsz)
+ return -1;
+ return 0;
+}
+
+int log_get_filter_str(char *buf, int size)
+{
+ return _log_get_filter_str(iwmct_logdefs, LOG_SRC_MAX, buf, size);
+}
+
+int log_get_fw_filter_str(char *buf, int size)
+{
+ return _log_get_filter_str(iwmct_fw_logdefs, FW_LOG_SRC_MAX, buf, size);
+}
+
+#define HEXADECIMAL_RADIX 16
+#define LOG_SRC_FORMAT 7 /* log level is in format of "0xXXXX," */
+
+ssize_t show_iwmct_log_level(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct iwmct_priv *priv = dev_get_drvdata(d);
+ char *str_buf;
+ int buf_size;
+ ssize_t ret;
+
+ buf_size = (LOG_SRC_FORMAT * LOG_SRC_MAX) + 1;
+ str_buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!str_buf) {
+ LOG_ERROR(priv, DEBUGFS,
+ "failed to allocate %d bytes\n", buf_size);
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ if (log_get_filter_str(str_buf, buf_size) < 0) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret = sprintf(buf, "%s", str_buf);
+
+exit:
+ kfree(str_buf);
+ return ret;
+}
+
+ssize_t store_iwmct_log_level(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct iwmct_priv *priv = dev_get_drvdata(d);
+ char *token, *str_buf = NULL;
+ long val;
+ ssize_t ret = count;
+ u8 src, mask;
+
+ if (!count)
+ goto exit;
+
+ str_buf = kzalloc(count, GFP_KERNEL);
+ if (!str_buf) {
+ LOG_ERROR(priv, DEBUGFS,
+ "failed to allocate %zd bytes\n", count);
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ memcpy(str_buf, buf, count);
+
+ while ((token = strsep(&str_buf, ",")) != NULL) {
+ while (isspace(*token))
+ ++token;
+ if (strict_strtol(token, HEXADECIMAL_RADIX, &val)) {
+ LOG_ERROR(priv, DEBUGFS,
+ "failed to convert string to long %s\n",
+ token);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ mask = val & 0xFF;
+ src = (val & 0XFF00) >> 8;
+ iwmct_log_set_filter(src, mask);
+ }
+
+exit:
+ kfree(str_buf);
+ return ret;
+}
+
+ssize_t show_iwmct_log_level_fw(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct iwmct_priv *priv = dev_get_drvdata(d);
+ char *str_buf;
+ int buf_size;
+ ssize_t ret;
+
+ buf_size = (LOG_SRC_FORMAT * FW_LOG_SRC_MAX) + 2;
+
+ str_buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!str_buf) {
+ LOG_ERROR(priv, DEBUGFS,
+ "failed to allocate %d bytes\n", buf_size);
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ if (log_get_fw_filter_str(str_buf, buf_size) < 0) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret = sprintf(buf, "%s", str_buf);
+
+exit:
+ kfree(str_buf);
+ return ret;
+}
+
+ssize_t store_iwmct_log_level_fw(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct iwmct_priv *priv = dev_get_drvdata(d);
+ struct top_msg cmd;
+ char *token, *str_buf = NULL;
+ ssize_t ret = count;
+ u16 cmdlen = 0;
+ int i;
+ long val;
+ u8 src, mask;
+
+ if (!count)
+ goto exit;
+
+ str_buf = kzalloc(count, GFP_KERNEL);
+ if (!str_buf) {
+ LOG_ERROR(priv, DEBUGFS,
+ "failed to allocate %zd bytes\n", count);
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ memcpy(str_buf, buf, count);
+
+ cmd.hdr.type = COMM_TYPE_H2D;
+ cmd.hdr.category = COMM_CATEGORY_DEBUG;
+ cmd.hdr.opcode = CMD_DBG_LOG_LEVEL;
+
+ for (i = 0; ((token = strsep(&str_buf, ",")) != NULL) &&
+ (i < FW_LOG_SRC_MAX); i++) {
+
+ while (isspace(*token))
+ ++token;
+
+ if (strict_strtol(token, HEXADECIMAL_RADIX, &val)) {
+ LOG_ERROR(priv, DEBUGFS,
+ "failed to convert string to long %s\n",
+ token);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ mask = val & 0xFF; /* LSB */
+ src = (val & 0XFF00) >> 8; /* 2nd least significant byte. */
+ iwmct_log_set_fw_filter(src, mask);
+
+ cmd.u.logdefs[i].logsource = src;
+ cmd.u.logdefs[i].sevmask = mask;
+ }
+
+ cmd.hdr.length = cpu_to_le16(i * sizeof(cmd.u.logdefs[0]));
+ cmdlen = (i * sizeof(cmd.u.logdefs[0]) + sizeof(cmd.hdr));
+
+ ret = iwmct_send_hcmd(priv, (u8 *)&cmd, cmdlen);
+ if (ret) {
+ LOG_ERROR(priv, DEBUGFS,
+ "Failed to send %d bytes of fwcmd, ret=%zd\n",
+ cmdlen, ret);
+ goto exit;
+ } else
+ LOG_INFO(priv, DEBUGFS, "fwcmd sent (%d bytes)\n", cmdlen);
+
+ ret = count;
+
+exit:
+ kfree(str_buf);
+ return ret;
+}
+
diff --git a/drivers/misc/iwmc3200top/log.h b/drivers/misc/iwmc3200top/log.h
new file mode 100644
index 00000000..4434bb16
--- /dev/null
+++ b/drivers/misc/iwmc3200top/log.h
@@ -0,0 +1,171 @@
+/*
+ * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
+ * drivers/misc/iwmc3200top/log.h
+ *
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
+ * -
+ *
+ */
+
+#ifndef __LOG_H__
+#define __LOG_H__
+
+
+/* log severity:
+ * The log levels here match FW log levels
+ * so values need to stay as is */
+#define LOG_SEV_CRITICAL 0
+#define LOG_SEV_ERROR 1
+#define LOG_SEV_WARNING 2
+#define LOG_SEV_INFO 3
+#define LOG_SEV_INFOEX 4
+
+/* Log levels not defined for FW */
+#define LOG_SEV_TRACE 5
+#define LOG_SEV_DUMP 6
+
+#define LOG_SEV_FW_FILTER_ALL \
+ (BIT(LOG_SEV_CRITICAL) | \
+ BIT(LOG_SEV_ERROR) | \
+ BIT(LOG_SEV_WARNING) | \
+ BIT(LOG_SEV_INFO) | \
+ BIT(LOG_SEV_INFOEX))
+
+#define LOG_SEV_FILTER_ALL \
+ (BIT(LOG_SEV_CRITICAL) | \
+ BIT(LOG_SEV_ERROR) | \
+ BIT(LOG_SEV_WARNING) | \
+ BIT(LOG_SEV_INFO) | \
+ BIT(LOG_SEV_INFOEX) | \
+ BIT(LOG_SEV_TRACE) | \
+ BIT(LOG_SEV_DUMP))
+
+/* log source */
+#define LOG_SRC_INIT 0
+#define LOG_SRC_DEBUGFS 1
+#define LOG_SRC_FW_DOWNLOAD 2
+#define LOG_SRC_FW_MSG 3
+#define LOG_SRC_TST 4
+#define LOG_SRC_IRQ 5
+
+#define LOG_SRC_MAX 6
+#define LOG_SRC_ALL 0xFF
+
+/**
+ * Default intitialization runtime log level
+ */
+#ifndef LOG_SEV_FILTER_RUNTIME
+#define LOG_SEV_FILTER_RUNTIME \
+ (BIT(LOG_SEV_CRITICAL) | \
+ BIT(LOG_SEV_ERROR) | \
+ BIT(LOG_SEV_WARNING))
+#endif
+
+#ifndef FW_LOG_SEV_FILTER_RUNTIME
+#define FW_LOG_SEV_FILTER_RUNTIME LOG_SEV_FILTER_ALL
+#endif
+
+#ifdef CONFIG_IWMC3200TOP_DEBUG
+/**
+ * Log macros
+ */
+
+#define priv2dev(priv) (&(priv->func)->dev)
+
+#define LOG_CRITICAL(priv, src, fmt, args...) \
+do { \
+ if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_CRITICAL)) \
+ dev_crit(priv2dev(priv), "%s %d: " fmt, \
+ __func__, __LINE__, ##args); \
+} while (0)
+
+#define LOG_ERROR(priv, src, fmt, args...) \
+do { \
+ if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_ERROR)) \
+ dev_err(priv2dev(priv), "%s %d: " fmt, \
+ __func__, __LINE__, ##args); \
+} while (0)
+
+#define LOG_WARNING(priv, src, fmt, args...) \
+do { \
+ if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_WARNING)) \
+ dev_warn(priv2dev(priv), "%s %d: " fmt, \
+ __func__, __LINE__, ##args); \
+} while (0)
+
+#define LOG_INFO(priv, src, fmt, args...) \
+do { \
+ if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_INFO)) \
+ dev_info(priv2dev(priv), "%s %d: " fmt, \
+ __func__, __LINE__, ##args); \
+} while (0)
+
+#define LOG_TRACE(priv, src, fmt, args...) \
+do { \
+ if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_TRACE)) \
+ dev_dbg(priv2dev(priv), "%s %d: " fmt, \
+ __func__, __LINE__, ##args); \
+} while (0)
+
+#define LOG_HEXDUMP(src, ptr, len) \
+do { \
+ if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_DUMP)) \
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, \
+ 16, 1, ptr, len, false); \
+} while (0)
+
+void iwmct_log_top_message(struct iwmct_priv *priv, u8 *buf, int len);
+
+extern u8 iwmct_logdefs[];
+
+int iwmct_log_set_filter(u8 src, u8 logmask);
+int iwmct_log_set_fw_filter(u8 src, u8 logmask);
+
+ssize_t show_iwmct_log_level(struct device *d,
+ struct device_attribute *attr, char *buf);
+ssize_t store_iwmct_log_level(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t count);
+ssize_t show_iwmct_log_level_fw(struct device *d,
+ struct device_attribute *attr, char *buf);
+ssize_t store_iwmct_log_level_fw(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t count);
+
+#else
+
+#define LOG_CRITICAL(priv, src, fmt, args...)
+#define LOG_ERROR(priv, src, fmt, args...)
+#define LOG_WARNING(priv, src, fmt, args...)
+#define LOG_INFO(priv, src, fmt, args...)
+#define LOG_TRACE(priv, src, fmt, args...)
+#define LOG_HEXDUMP(src, ptr, len)
+
+static inline void iwmct_log_top_message(struct iwmct_priv *priv,
+ u8 *buf, int len) {}
+static inline int iwmct_log_set_filter(u8 src, u8 logmask) { return 0; }
+static inline int iwmct_log_set_fw_filter(u8 src, u8 logmask) { return 0; }
+
+#endif /* CONFIG_IWMC3200TOP_DEBUG */
+
+int log_get_filter_str(char *buf, int size);
+int log_get_fw_filter_str(char *buf, int size);
+
+#endif /* __LOG_H__ */
diff --git a/drivers/misc/iwmc3200top/main.c b/drivers/misc/iwmc3200top/main.c
new file mode 100644
index 00000000..701eb600
--- /dev/null
+++ b/drivers/misc/iwmc3200top/main.c
@@ -0,0 +1,662 @@
+/*
+ * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
+ * drivers/misc/iwmc3200top/main.c
+ *
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
+ * -
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio.h>
+
+#include "iwmc3200top.h"
+#include "log.h"
+#include "fw-msg.h"
+#include "debugfs.h"
+
+
+#define DRIVER_DESCRIPTION "Intel(R) IWMC 3200 Top Driver"
+#define DRIVER_COPYRIGHT "Copyright (c) 2008 Intel Corporation."
+
+#define DRIVER_VERSION "0.1.62"
+
+MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR(DRIVER_COPYRIGHT);
+MODULE_FIRMWARE(FW_NAME(FW_API_VER));
+
+
+static inline int __iwmct_tx(struct iwmct_priv *priv, void *src, int count)
+{
+ return sdio_memcpy_toio(priv->func, IWMC_SDIO_DATA_ADDR, src, count);
+
+}
+int iwmct_tx(struct iwmct_priv *priv, void *src, int count)
+{
+ int ret;
+ sdio_claim_host(priv->func);
+ ret = __iwmct_tx(priv, src, count);
+ sdio_release_host(priv->func);
+ return ret;
+}
+/*
+ * This workers main task is to wait for OP_OPR_ALIVE
+ * from TOP FW until ALIVE_MSG_TIMOUT timeout is elapsed.
+ * When OP_OPR_ALIVE received it will issue
+ * a call to "bus_rescan_devices".
+ */
+static void iwmct_rescan_worker(struct work_struct *ws)
+{
+ struct iwmct_priv *priv;
+ int ret;
+
+ priv = container_of(ws, struct iwmct_priv, bus_rescan_worker);
+
+ LOG_INFO(priv, FW_MSG, "Calling bus_rescan\n");
+
+ ret = bus_rescan_devices(priv->func->dev.bus);
+ if (ret < 0)
+ LOG_INFO(priv, INIT, "bus_rescan_devices FAILED!!!\n");
+}
+
+static void op_top_message(struct iwmct_priv *priv, struct top_msg *msg)
+{
+ switch (msg->hdr.opcode) {
+ case OP_OPR_ALIVE:
+ LOG_INFO(priv, FW_MSG, "Got ALIVE from device, wake rescan\n");
+ schedule_work(&priv->bus_rescan_worker);
+ break;
+ default:
+ LOG_INFO(priv, FW_MSG, "Received msg opcode 0x%X\n",
+ msg->hdr.opcode);
+ break;
+ }
+}
+
+
+static void handle_top_message(struct iwmct_priv *priv, u8 *buf, int len)
+{
+ struct top_msg *msg;
+
+ msg = (struct top_msg *)buf;
+
+ if (msg->hdr.type != COMM_TYPE_D2H) {
+ LOG_ERROR(priv, FW_MSG,
+ "Message from TOP with invalid message type 0x%X\n",
+ msg->hdr.type);
+ return;
+ }
+
+ if (len < sizeof(msg->hdr)) {
+ LOG_ERROR(priv, FW_MSG,
+ "Message from TOP is too short for message header "
+ "received %d bytes, expected at least %zd bytes\n",
+ len, sizeof(msg->hdr));
+ return;
+ }
+
+ if (len < le16_to_cpu(msg->hdr.length) + sizeof(msg->hdr)) {
+ LOG_ERROR(priv, FW_MSG,
+ "Message length (%d bytes) is shorter than "
+ "in header (%d bytes)\n",
+ len, le16_to_cpu(msg->hdr.length));
+ return;
+ }
+
+ switch (msg->hdr.category) {
+ case COMM_CATEGORY_OPERATIONAL:
+ op_top_message(priv, (struct top_msg *)buf);
+ break;
+
+ case COMM_CATEGORY_DEBUG:
+ case COMM_CATEGORY_TESTABILITY:
+ case COMM_CATEGORY_DIAGNOSTICS:
+ iwmct_log_top_message(priv, buf, len);
+ break;
+
+ default:
+ LOG_ERROR(priv, FW_MSG,
+ "Message from TOP with unknown category 0x%X\n",
+ msg->hdr.category);
+ break;
+ }
+}
+
+int iwmct_send_hcmd(struct iwmct_priv *priv, u8 *cmd, u16 len)
+{
+ int ret;
+ u8 *buf;
+
+ LOG_TRACE(priv, FW_MSG, "Sending hcmd:\n");
+
+ /* add padding to 256 for IWMC */
+ ((struct top_msg *)cmd)->hdr.flags |= CMD_FLAG_PADDING_256;
+
+ LOG_HEXDUMP(FW_MSG, cmd, len);
+
+ if (len > FW_HCMD_BLOCK_SIZE) {
+ LOG_ERROR(priv, FW_MSG, "size %d exceeded hcmd max size %d\n",
+ len, FW_HCMD_BLOCK_SIZE);
+ return -1;
+ }
+
+ buf = kzalloc(FW_HCMD_BLOCK_SIZE, GFP_KERNEL);
+ if (!buf) {
+ LOG_ERROR(priv, FW_MSG, "kzalloc error, buf size %d\n",
+ FW_HCMD_BLOCK_SIZE);
+ return -1;
+ }
+
+ memcpy(buf, cmd, len);
+ ret = iwmct_tx(priv, buf, FW_HCMD_BLOCK_SIZE);
+
+ kfree(buf);
+ return ret;
+}
+
+
+static void iwmct_irq_read_worker(struct work_struct *ws)
+{
+ struct iwmct_priv *priv;
+ struct iwmct_work_struct *read_req;
+ __le32 *buf = NULL;
+ int ret;
+ int iosize;
+ u32 barker;
+ bool is_barker;
+
+ priv = container_of(ws, struct iwmct_priv, isr_worker);
+
+ LOG_TRACE(priv, IRQ, "enter iwmct_irq_read_worker %p\n", ws);
+
+ /* --------------------- Handshake with device -------------------- */
+ sdio_claim_host(priv->func);
+
+ /* all list manipulations have to be protected by
+ * sdio_claim_host/sdio_release_host */
+ if (list_empty(&priv->read_req_list)) {
+ LOG_ERROR(priv, IRQ, "read_req_list empty in read worker\n");
+ goto exit_release;
+ }
+
+ read_req = list_entry(priv->read_req_list.next,
+ struct iwmct_work_struct, list);
+
+ list_del(&read_req->list);
+ iosize = read_req->iosize;
+ kfree(read_req);
+
+ buf = kzalloc(iosize, GFP_KERNEL);
+ if (!buf) {
+ LOG_ERROR(priv, IRQ, "kzalloc error, buf size %d\n", iosize);
+ goto exit_release;
+ }
+
+ LOG_INFO(priv, IRQ, "iosize=%d, buf=%p, func=%d\n",
+ iosize, buf, priv->func->num);
+
+ /* read from device */
+ ret = sdio_memcpy_fromio(priv->func, buf, IWMC_SDIO_DATA_ADDR, iosize);
+ if (ret) {
+ LOG_ERROR(priv, IRQ, "error %d reading buffer\n", ret);
+ goto exit_release;
+ }
+
+ LOG_HEXDUMP(IRQ, (u8 *)buf, iosize);
+
+ barker = le32_to_cpu(buf[0]);
+
+ /* Verify whether it's a barker and if not - treat as regular Rx */
+ if (barker == IWMC_BARKER_ACK ||
+ (barker & BARKER_DNLOAD_BARKER_MSK) == IWMC_BARKER_REBOOT) {
+
+ /* Valid Barker is equal on first 4 dwords */
+ is_barker = (buf[1] == buf[0]) &&
+ (buf[2] == buf[0]) &&
+ (buf[3] == buf[0]);
+
+ if (!is_barker) {
+ LOG_WARNING(priv, IRQ,
+ "Potentially inconsistent barker "
+ "%08X_%08X_%08X_%08X\n",
+ le32_to_cpu(buf[0]), le32_to_cpu(buf[1]),
+ le32_to_cpu(buf[2]), le32_to_cpu(buf[3]));
+ }
+ } else {
+ is_barker = false;
+ }
+
+ /* Handle Top CommHub message */
+ if (!is_barker) {
+ sdio_release_host(priv->func);
+ handle_top_message(priv, (u8 *)buf, iosize);
+ goto exit;
+ } else if (barker == IWMC_BARKER_ACK) { /* Handle barkers */
+ if (atomic_read(&priv->dev_sync) == 0) {
+ LOG_ERROR(priv, IRQ,
+ "ACK barker arrived out-of-sync\n");
+ goto exit_release;
+ }
+
+ /* Continuing to FW download (after Sync is completed)*/
+ atomic_set(&priv->dev_sync, 0);
+ LOG_INFO(priv, IRQ, "ACK barker arrived "
+ "- starting FW download\n");
+ } else { /* REBOOT barker */
+ LOG_INFO(priv, IRQ, "Received reboot barker: %x\n", barker);
+ priv->barker = barker;
+
+ if (barker & BARKER_DNLOAD_SYNC_MSK) {
+ /* Send the same barker back */
+ ret = __iwmct_tx(priv, buf, iosize);
+ if (ret) {
+ LOG_ERROR(priv, IRQ,
+ "error %d echoing barker\n", ret);
+ goto exit_release;
+ }
+ LOG_INFO(priv, IRQ, "Echoing barker to device\n");
+ atomic_set(&priv->dev_sync, 1);
+ goto exit_release;
+ }
+
+ /* Continuing to FW download (without Sync) */
+ LOG_INFO(priv, IRQ, "No sync requested "
+ "- starting FW download\n");
+ }
+
+ sdio_release_host(priv->func);
+
+ if (priv->dbg.fw_download)
+ iwmct_fw_load(priv);
+ else
+ LOG_ERROR(priv, IRQ, "FW download not allowed\n");
+
+ goto exit;
+
+exit_release:
+ sdio_release_host(priv->func);
+exit:
+ kfree(buf);
+ LOG_TRACE(priv, IRQ, "exit iwmct_irq_read_worker\n");
+}
+
+static void iwmct_irq(struct sdio_func *func)
+{
+ struct iwmct_priv *priv;
+ int val, ret;
+ int iosize;
+ int addr = IWMC_SDIO_INTR_GET_SIZE_ADDR;
+ struct iwmct_work_struct *read_req;
+
+ priv = sdio_get_drvdata(func);
+
+ LOG_TRACE(priv, IRQ, "enter iwmct_irq\n");
+
+ /* read the function's status register */
+ val = sdio_readb(func, IWMC_SDIO_INTR_STATUS_ADDR, &ret);
+
+ LOG_TRACE(priv, IRQ, "iir value = %d, ret=%d\n", val, ret);
+
+ if (!val) {
+ LOG_ERROR(priv, IRQ, "iir = 0, exiting ISR\n");
+ goto exit_clear_intr;
+ }
+
+
+ /*
+ * read 2 bytes of the transaction size
+ * IMPORTANT: sdio transaction size has to be read before clearing
+ * sdio interrupt!!!
+ */
+ val = sdio_readb(priv->func, addr++, &ret);
+ iosize = val;
+ val = sdio_readb(priv->func, addr++, &ret);
+ iosize += val << 8;
+
+ LOG_INFO(priv, IRQ, "READ size %d\n", iosize);
+
+ if (iosize == 0) {
+ LOG_ERROR(priv, IRQ, "READ size %d, exiting ISR\n", iosize);
+ goto exit_clear_intr;
+ }
+
+ /* allocate a work structure to pass iosize to the worker */
+ read_req = kzalloc(sizeof(struct iwmct_work_struct), GFP_KERNEL);
+ if (!read_req) {
+ LOG_ERROR(priv, IRQ, "failed to allocate read_req, exit ISR\n");
+ goto exit_clear_intr;
+ }
+
+ INIT_LIST_HEAD(&read_req->list);
+ read_req->iosize = iosize;
+
+ list_add_tail(&priv->read_req_list, &read_req->list);
+
+ /* clear the function's interrupt request bit (write 1 to clear) */
+ sdio_writeb(func, 1, IWMC_SDIO_INTR_CLEAR_ADDR, &ret);
+
+ schedule_work(&priv->isr_worker);
+
+ LOG_TRACE(priv, IRQ, "exit iwmct_irq\n");
+
+ return;
+
+exit_clear_intr:
+ /* clear the function's interrupt request bit (write 1 to clear) */
+ sdio_writeb(func, 1, IWMC_SDIO_INTR_CLEAR_ADDR, &ret);
+}
+
+
+static int blocks;
+module_param(blocks, int, 0604);
+MODULE_PARM_DESC(blocks, "max_blocks_to_send");
+
+static bool dump;
+module_param(dump, bool, 0604);
+MODULE_PARM_DESC(dump, "dump_hex_content");
+
+static bool jump = 1;
+module_param(jump, bool, 0604);
+
+static bool direct = 1;
+module_param(direct, bool, 0604);
+
+static bool checksum = 1;
+module_param(checksum, bool, 0604);
+
+static bool fw_download = 1;
+module_param(fw_download, bool, 0604);
+
+static int block_size = IWMC_SDIO_BLK_SIZE;
+module_param(block_size, int, 0404);
+
+static int download_trans_blks = IWMC_DEFAULT_TR_BLK;
+module_param(download_trans_blks, int, 0604);
+
+static bool rubbish_barker;
+module_param(rubbish_barker, bool, 0604);
+
+#ifdef CONFIG_IWMC3200TOP_DEBUG
+static int log_level[LOG_SRC_MAX];
+static unsigned int log_level_argc;
+module_param_array(log_level, int, &log_level_argc, 0604);
+MODULE_PARM_DESC(log_level, "log_level");
+
+static int log_level_fw[FW_LOG_SRC_MAX];
+static unsigned int log_level_fw_argc;
+module_param_array(log_level_fw, int, &log_level_fw_argc, 0604);
+MODULE_PARM_DESC(log_level_fw, "log_level_fw");
+#endif
+
+void iwmct_dbg_init_params(struct iwmct_priv *priv)
+{
+#ifdef CONFIG_IWMC3200TOP_DEBUG
+ int i;
+
+ for (i = 0; i < log_level_argc; i++) {
+ dev_notice(&priv->func->dev, "log_level[%d]=0x%X\n",
+ i, log_level[i]);
+ iwmct_log_set_filter((log_level[i] >> 8) & 0xFF,
+ log_level[i] & 0xFF);
+ }
+ for (i = 0; i < log_level_fw_argc; i++) {
+ dev_notice(&priv->func->dev, "log_level_fw[%d]=0x%X\n",
+ i, log_level_fw[i]);
+ iwmct_log_set_fw_filter((log_level_fw[i] >> 8) & 0xFF,
+ log_level_fw[i] & 0xFF);
+ }
+#endif
+
+ priv->dbg.blocks = blocks;
+ LOG_INFO(priv, INIT, "blocks=%d\n", blocks);
+ priv->dbg.dump = (bool)dump;
+ LOG_INFO(priv, INIT, "dump=%d\n", dump);
+ priv->dbg.jump = (bool)jump;
+ LOG_INFO(priv, INIT, "jump=%d\n", jump);
+ priv->dbg.direct = (bool)direct;
+ LOG_INFO(priv, INIT, "direct=%d\n", direct);
+ priv->dbg.checksum = (bool)checksum;
+ LOG_INFO(priv, INIT, "checksum=%d\n", checksum);
+ priv->dbg.fw_download = (bool)fw_download;
+ LOG_INFO(priv, INIT, "fw_download=%d\n", fw_download);
+ priv->dbg.block_size = block_size;
+ LOG_INFO(priv, INIT, "block_size=%d\n", block_size);
+ priv->dbg.download_trans_blks = download_trans_blks;
+ LOG_INFO(priv, INIT, "download_trans_blks=%d\n", download_trans_blks);
+}
+
+/*****************************************************************************
+ *
+ * sysfs attributes
+ *
+ *****************************************************************************/
+static ssize_t show_iwmct_fw_version(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct iwmct_priv *priv = dev_get_drvdata(d);
+ return sprintf(buf, "%s\n", priv->dbg.label_fw);
+}
+static DEVICE_ATTR(cc_label_fw, S_IRUGO, show_iwmct_fw_version, NULL);
+
+#ifdef CONFIG_IWMC3200TOP_DEBUG
+static DEVICE_ATTR(log_level, S_IWUSR | S_IRUGO,
+ show_iwmct_log_level, store_iwmct_log_level);
+static DEVICE_ATTR(log_level_fw, S_IWUSR | S_IRUGO,
+ show_iwmct_log_level_fw, store_iwmct_log_level_fw);
+#endif
+
+static struct attribute *iwmct_sysfs_entries[] = {
+ &dev_attr_cc_label_fw.attr,
+#ifdef CONFIG_IWMC3200TOP_DEBUG
+ &dev_attr_log_level.attr,
+ &dev_attr_log_level_fw.attr,
+#endif
+ NULL
+};
+
+static struct attribute_group iwmct_attribute_group = {
+ .name = NULL, /* put in device directory */
+ .attrs = iwmct_sysfs_entries,
+};
+
+
+static int iwmct_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ struct iwmct_priv *priv;
+ int ret;
+ int val = 1;
+ int addr = IWMC_SDIO_INTR_ENABLE_ADDR;
+
+ dev_dbg(&func->dev, "enter iwmct_probe\n");
+
+ dev_dbg(&func->dev, "IRQ polling period id %u msecs, HZ is %d\n",
+ jiffies_to_msecs(2147483647), HZ);
+
+ priv = kzalloc(sizeof(struct iwmct_priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(&func->dev, "kzalloc error\n");
+ return -ENOMEM;
+ }
+ priv->func = func;
+ sdio_set_drvdata(func, priv);
+
+ INIT_WORK(&priv->bus_rescan_worker, iwmct_rescan_worker);
+ INIT_WORK(&priv->isr_worker, iwmct_irq_read_worker);
+
+ init_waitqueue_head(&priv->wait_q);
+
+ sdio_claim_host(func);
+ /* FIXME: Remove after it is fixed in the Boot ROM upgrade */
+ func->enable_timeout = 10;
+
+ /* In our HW, setting the block size also wakes up the boot rom. */
+ ret = sdio_set_block_size(func, priv->dbg.block_size);
+ if (ret) {
+ LOG_ERROR(priv, INIT,
+ "sdio_set_block_size() failure: %d\n", ret);
+ goto error_sdio_enable;
+ }
+
+ ret = sdio_enable_func(func);
+ if (ret) {
+ LOG_ERROR(priv, INIT, "sdio_enable_func() failure: %d\n", ret);
+ goto error_sdio_enable;
+ }
+
+ /* init reset and dev_sync states */
+ atomic_set(&priv->reset, 0);
+ atomic_set(&priv->dev_sync, 0);
+
+ /* init read req queue */
+ INIT_LIST_HEAD(&priv->read_req_list);
+
+ /* process configurable parameters */
+ iwmct_dbg_init_params(priv);
+ ret = sysfs_create_group(&func->dev.kobj, &iwmct_attribute_group);
+ if (ret) {
+ LOG_ERROR(priv, INIT, "Failed to register attributes and "
+ "initialize module_params\n");
+ goto error_dev_attrs;
+ }
+
+ iwmct_dbgfs_register(priv, DRV_NAME);
+
+ if (!priv->dbg.direct && priv->dbg.download_trans_blks > 8) {
+ LOG_INFO(priv, INIT,
+ "Reducing transaction to 8 blocks = 2K (from %d)\n",
+ priv->dbg.download_trans_blks);
+ priv->dbg.download_trans_blks = 8;
+ }
+ priv->trans_len = priv->dbg.download_trans_blks * priv->dbg.block_size;
+ LOG_INFO(priv, INIT, "Transaction length = %d\n", priv->trans_len);
+
+ ret = sdio_claim_irq(func, iwmct_irq);
+ if (ret) {
+ LOG_ERROR(priv, INIT, "sdio_claim_irq() failure: %d\n", ret);
+ goto error_claim_irq;
+ }
+
+
+ /* Enable function's interrupt */
+ sdio_writeb(priv->func, val, addr, &ret);
+ if (ret) {
+ LOG_ERROR(priv, INIT, "Failure writing to "
+ "Interrupt Enable Register (%d): %d\n", addr, ret);
+ goto error_enable_int;
+ }
+
+ sdio_release_host(func);
+
+ LOG_INFO(priv, INIT, "exit iwmct_probe\n");
+
+ return ret;
+
+error_enable_int:
+ sdio_release_irq(func);
+error_claim_irq:
+ sdio_disable_func(func);
+error_dev_attrs:
+ iwmct_dbgfs_unregister(priv->dbgfs);
+ sysfs_remove_group(&func->dev.kobj, &iwmct_attribute_group);
+error_sdio_enable:
+ sdio_release_host(func);
+ return ret;
+}
+
+static void iwmct_remove(struct sdio_func *func)
+{
+ struct iwmct_work_struct *read_req;
+ struct iwmct_priv *priv = sdio_get_drvdata(func);
+
+ LOG_INFO(priv, INIT, "enter\n");
+
+ sdio_claim_host(func);
+ sdio_release_irq(func);
+ sdio_release_host(func);
+
+ /* Make sure works are finished */
+ flush_work_sync(&priv->bus_rescan_worker);
+ flush_work_sync(&priv->isr_worker);
+
+ sdio_claim_host(func);
+ sdio_disable_func(func);
+ sysfs_remove_group(&func->dev.kobj, &iwmct_attribute_group);
+ iwmct_dbgfs_unregister(priv->dbgfs);
+ sdio_release_host(func);
+
+ /* free read requests */
+ while (!list_empty(&priv->read_req_list)) {
+ read_req = list_entry(priv->read_req_list.next,
+ struct iwmct_work_struct, list);
+
+ list_del(&read_req->list);
+ kfree(read_req);
+ }
+
+ kfree(priv);
+}
+
+
+static const struct sdio_device_id iwmct_ids[] = {
+ /* Intel Wireless MultiCom 3200 Top Driver */
+ { SDIO_DEVICE(SDIO_VENDOR_ID_INTEL, 0x1404)},
+ { }, /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(sdio, iwmct_ids);
+
+static struct sdio_driver iwmct_driver = {
+ .probe = iwmct_probe,
+ .remove = iwmct_remove,
+ .name = DRV_NAME,
+ .id_table = iwmct_ids,
+};
+
+static int __init iwmct_init(void)
+{
+ int rc;
+
+ /* Default log filter settings */
+ iwmct_log_set_filter(LOG_SRC_ALL, LOG_SEV_FILTER_RUNTIME);
+ iwmct_log_set_filter(LOG_SRC_FW_MSG, LOG_SEV_FW_FILTER_ALL);
+ iwmct_log_set_fw_filter(LOG_SRC_ALL, FW_LOG_SEV_FILTER_RUNTIME);
+
+ rc = sdio_register_driver(&iwmct_driver);
+
+ return rc;
+}
+
+static void __exit iwmct_exit(void)
+{
+ sdio_unregister_driver(&iwmct_driver);
+}
+
+module_init(iwmct_init);
+module_exit(iwmct_exit);
+
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
new file mode 100644
index 00000000..3aa9a969
--- /dev/null
+++ b/drivers/misc/kgdbts.c
@@ -0,0 +1,1187 @@
+/*
+ * kgdbts is a test suite for kgdb for the sole purpose of validating
+ * that key pieces of the kgdb internals are working properly such as
+ * HW/SW breakpoints, single stepping, and NMI.
+ *
+ * Created by: Jason Wessel <jason.wessel@windriver.com>
+ *
+ * Copyright (c) 2008 Wind River Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/* Information about the kgdb test suite.
+ * -------------------------------------
+ *
+ * The kgdb test suite is designed as a KGDB I/O module which
+ * simulates the communications that a debugger would have with kgdb.
+ * The tests are broken up in to a line by line and referenced here as
+ * a "get" which is kgdb requesting input and "put" which is kgdb
+ * sending a response.
+ *
+ * The kgdb suite can be invoked from the kernel command line
+ * arguments system or executed dynamically at run time. The test
+ * suite uses the variable "kgdbts" to obtain the information about
+ * which tests to run and to configure the verbosity level. The
+ * following are the various characters you can use with the kgdbts=
+ * line:
+ *
+ * When using the "kgdbts=" you only choose one of the following core
+ * test types:
+ * A = Run all the core tests silently
+ * V1 = Run all the core tests with minimal output
+ * V2 = Run all the core tests in debug mode
+ *
+ * You can also specify optional tests:
+ * N## = Go to sleep with interrupts of for ## seconds
+ * to test the HW NMI watchdog
+ * F## = Break at do_fork for ## iterations
+ * S## = Break at sys_open for ## iterations
+ * I## = Run the single step test ## iterations
+ *
+ * NOTE: that the do_fork and sys_open tests are mutually exclusive.
+ *
+ * To invoke the kgdb test suite from boot you use a kernel start
+ * argument as follows:
+ * kgdbts=V1 kgdbwait
+ * Or if you wanted to perform the NMI test for 6 seconds and do_fork
+ * test for 100 forks, you could use:
+ * kgdbts=V1N6F100 kgdbwait
+ *
+ * The test suite can also be invoked at run time with:
+ * echo kgdbts=V1N6F100 > /sys/module/kgdbts/parameters/kgdbts
+ * Or as another example:
+ * echo kgdbts=V2 > /sys/module/kgdbts/parameters/kgdbts
+ *
+ * When developing a new kgdb arch specific implementation or
+ * using these tests for the purpose of regression testing,
+ * several invocations are required.
+ *
+ * 1) Boot with the test suite enabled by using the kernel arguments
+ * "kgdbts=V1F100 kgdbwait"
+ * ## If kgdb arch specific implementation has NMI use
+ * "kgdbts=V1N6F100
+ *
+ * 2) After the system boot run the basic test.
+ * echo kgdbts=V1 > /sys/module/kgdbts/parameters/kgdbts
+ *
+ * 3) Run the concurrency tests. It is best to use n+1
+ * while loops where n is the number of cpus you have
+ * in your system. The example below uses only two
+ * loops.
+ *
+ * ## This tests break points on sys_open
+ * while [ 1 ] ; do find / > /dev/null 2>&1 ; done &
+ * while [ 1 ] ; do find / > /dev/null 2>&1 ; done &
+ * echo kgdbts=V1S10000 > /sys/module/kgdbts/parameters/kgdbts
+ * fg # and hit control-c
+ * fg # and hit control-c
+ * ## This tests break points on do_fork
+ * while [ 1 ] ; do date > /dev/null ; done &
+ * while [ 1 ] ; do date > /dev/null ; done &
+ * echo kgdbts=V1F1000 > /sys/module/kgdbts/parameters/kgdbts
+ * fg # and hit control-c
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/kgdb.h>
+#include <linux/ctype.h>
+#include <linux/uaccess.h>
+#include <linux/syscalls.h>
+#include <linux/nmi.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+
+#define v1printk(a...) do { \
+ if (verbose) \
+ printk(KERN_INFO a); \
+ } while (0)
+#define v2printk(a...) do { \
+ if (verbose > 1) \
+ printk(KERN_INFO a); \
+ touch_nmi_watchdog(); \
+ } while (0)
+#define eprintk(a...) do { \
+ printk(KERN_ERR a); \
+ WARN_ON(1); \
+ } while (0)
+#define MAX_CONFIG_LEN 40
+
+static struct kgdb_io kgdbts_io_ops;
+static char get_buf[BUFMAX];
+static int get_buf_cnt;
+static char put_buf[BUFMAX];
+static int put_buf_cnt;
+static char scratch_buf[BUFMAX];
+static int verbose;
+static int repeat_test;
+static int test_complete;
+static int send_ack;
+static int final_ack;
+static int force_hwbrks;
+static int hwbreaks_ok;
+static int hw_break_val;
+static int hw_break_val2;
+static int cont_instead_of_sstep;
+static unsigned long cont_thread_id;
+static unsigned long sstep_thread_id;
+#if defined(CONFIG_ARM) || defined(CONFIG_MIPS) || defined(CONFIG_SPARC)
+static int arch_needs_sstep_emulation = 1;
+#else
+static int arch_needs_sstep_emulation;
+#endif
+static unsigned long cont_addr;
+static unsigned long sstep_addr;
+static int restart_from_top_after_write;
+static int sstep_state;
+
+/* Storage for the registers, in GDB format. */
+static unsigned long kgdbts_gdb_regs[(NUMREGBYTES +
+ sizeof(unsigned long) - 1) /
+ sizeof(unsigned long)];
+static struct pt_regs kgdbts_regs;
+
+/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
+static int configured = -1;
+
+#ifdef CONFIG_KGDB_TESTS_BOOT_STRING
+static char config[MAX_CONFIG_LEN] = CONFIG_KGDB_TESTS_BOOT_STRING;
+#else
+static char config[MAX_CONFIG_LEN];
+#endif
+static struct kparam_string kps = {
+ .string = config,
+ .maxlen = MAX_CONFIG_LEN,
+};
+
+static void fill_get_buf(char *buf);
+
+struct test_struct {
+ char *get;
+ char *put;
+ void (*get_handler)(char *);
+ int (*put_handler)(char *, char *);
+};
+
+struct test_state {
+ char *name;
+ struct test_struct *tst;
+ int idx;
+ int (*run_test) (int, int);
+ int (*validate_put) (char *);
+};
+
+static struct test_state ts;
+
+static int kgdbts_unreg_thread(void *ptr)
+{
+ /* Wait until the tests are complete and then ungresiter the I/O
+ * driver.
+ */
+ while (!final_ack)
+ msleep_interruptible(1500);
+ /* Pause for any other threads to exit after final ack. */
+ msleep_interruptible(1000);
+ if (configured)
+ kgdb_unregister_io_module(&kgdbts_io_ops);
+ configured = 0;
+
+ return 0;
+}
+
+/* This is noinline such that it can be used for a single location to
+ * place a breakpoint
+ */
+static noinline void kgdbts_break_test(void)
+{
+ v2printk("kgdbts: breakpoint complete\n");
+}
+
+/* Lookup symbol info in the kernel */
+static unsigned long lookup_addr(char *arg)
+{
+ unsigned long addr = 0;
+
+ if (!strcmp(arg, "kgdbts_break_test"))
+ addr = (unsigned long)kgdbts_break_test;
+ else if (!strcmp(arg, "sys_open"))
+ addr = (unsigned long)do_sys_open;
+ else if (!strcmp(arg, "do_fork"))
+ addr = (unsigned long)do_fork;
+ else if (!strcmp(arg, "hw_break_val"))
+ addr = (unsigned long)&hw_break_val;
+ return addr;
+}
+
+static void break_helper(char *bp_type, char *arg, unsigned long vaddr)
+{
+ unsigned long addr;
+
+ if (arg)
+ addr = lookup_addr(arg);
+ else
+ addr = vaddr;
+
+ sprintf(scratch_buf, "%s,%lx,%i", bp_type, addr,
+ BREAK_INSTR_SIZE);
+ fill_get_buf(scratch_buf);
+}
+
+static void sw_break(char *arg)
+{
+ break_helper(force_hwbrks ? "Z1" : "Z0", arg, 0);
+}
+
+static void sw_rem_break(char *arg)
+{
+ break_helper(force_hwbrks ? "z1" : "z0", arg, 0);
+}
+
+static void hw_break(char *arg)
+{
+ break_helper("Z1", arg, 0);
+}
+
+static void hw_rem_break(char *arg)
+{
+ break_helper("z1", arg, 0);
+}
+
+static void hw_write_break(char *arg)
+{
+ break_helper("Z2", arg, 0);
+}
+
+static void hw_rem_write_break(char *arg)
+{
+ break_helper("z2", arg, 0);
+}
+
+static void hw_access_break(char *arg)
+{
+ break_helper("Z4", arg, 0);
+}
+
+static void hw_rem_access_break(char *arg)
+{
+ break_helper("z4", arg, 0);
+}
+
+static void hw_break_val_access(void)
+{
+ hw_break_val2 = hw_break_val;
+}
+
+static void hw_break_val_write(void)
+{
+ hw_break_val++;
+}
+
+static int get_thread_id_continue(char *put_str, char *arg)
+{
+ char *ptr = &put_str[11];
+
+ if (put_str[1] != 'T' || put_str[2] != '0')
+ return 1;
+ kgdb_hex2long(&ptr, &cont_thread_id);
+ return 0;
+}
+
+static int check_and_rewind_pc(char *put_str, char *arg)
+{
+ unsigned long addr = lookup_addr(arg);
+ unsigned long ip;
+ int offset = 0;
+
+ kgdb_hex2mem(&put_str[1], (char *)kgdbts_gdb_regs,
+ NUMREGBYTES);
+ gdb_regs_to_pt_regs(kgdbts_gdb_regs, &kgdbts_regs);
+ ip = instruction_pointer(&kgdbts_regs);
+ v2printk("Stopped at IP: %lx\n", ip);
+#ifdef GDB_ADJUSTS_BREAK_OFFSET
+ /* On some arches, a breakpoint stop requires it to be decremented */
+ if (addr + BREAK_INSTR_SIZE == ip)
+ offset = -BREAK_INSTR_SIZE;
+#endif
+
+ if (arch_needs_sstep_emulation && sstep_addr &&
+ ip + offset == sstep_addr &&
+ ((!strcmp(arg, "sys_open") || !strcmp(arg, "do_fork")))) {
+ /* This is special case for emulated single step */
+ v2printk("Emul: rewind hit single step bp\n");
+ restart_from_top_after_write = 1;
+ } else if (strcmp(arg, "silent") && ip + offset != addr) {
+ eprintk("kgdbts: BP mismatch %lx expected %lx\n",
+ ip + offset, addr);
+ return 1;
+ }
+ /* Readjust the instruction pointer if needed */
+ ip += offset;
+ cont_addr = ip;
+#ifdef GDB_ADJUSTS_BREAK_OFFSET
+ instruction_pointer_set(&kgdbts_regs, ip);
+#endif
+ return 0;
+}
+
+static int check_single_step(char *put_str, char *arg)
+{
+ unsigned long addr = lookup_addr(arg);
+ static int matched_id;
+
+ /*
+ * From an arch indepent point of view the instruction pointer
+ * should be on a different instruction
+ */
+ kgdb_hex2mem(&put_str[1], (char *)kgdbts_gdb_regs,
+ NUMREGBYTES);
+ gdb_regs_to_pt_regs(kgdbts_gdb_regs, &kgdbts_regs);
+ v2printk("Singlestep stopped at IP: %lx\n",
+ instruction_pointer(&kgdbts_regs));
+
+ if (sstep_thread_id != cont_thread_id) {
+ /*
+ * Ensure we stopped in the same thread id as before, else the
+ * debugger should continue until the original thread that was
+ * single stepped is scheduled again, emulating gdb's behavior.
+ */
+ v2printk("ThrID does not match: %lx\n", cont_thread_id);
+ if (arch_needs_sstep_emulation) {
+ if (matched_id &&
+ instruction_pointer(&kgdbts_regs) != addr)
+ goto continue_test;
+ matched_id++;
+ ts.idx -= 2;
+ sstep_state = 0;
+ return 0;
+ }
+ cont_instead_of_sstep = 1;
+ ts.idx -= 4;
+ return 0;
+ }
+continue_test:
+ matched_id = 0;
+ if (instruction_pointer(&kgdbts_regs) == addr) {
+ eprintk("kgdbts: SingleStep failed at %lx\n",
+ instruction_pointer(&kgdbts_regs));
+ return 1;
+ }
+
+ return 0;
+}
+
+static void write_regs(char *arg)
+{
+ memset(scratch_buf, 0, sizeof(scratch_buf));
+ scratch_buf[0] = 'G';
+ pt_regs_to_gdb_regs(kgdbts_gdb_regs, &kgdbts_regs);
+ kgdb_mem2hex((char *)kgdbts_gdb_regs, &scratch_buf[1], NUMREGBYTES);
+ fill_get_buf(scratch_buf);
+}
+
+static void skip_back_repeat_test(char *arg)
+{
+ int go_back = simple_strtol(arg, NULL, 10);
+
+ repeat_test--;
+ if (repeat_test <= 0)
+ ts.idx++;
+ else
+ ts.idx -= go_back;
+ fill_get_buf(ts.tst[ts.idx].get);
+}
+
+static int got_break(char *put_str, char *arg)
+{
+ test_complete = 1;
+ if (!strncmp(put_str+1, arg, 2)) {
+ if (!strncmp(arg, "T0", 2))
+ test_complete = 2;
+ return 0;
+ }
+ return 1;
+}
+
+static void get_cont_catch(char *arg)
+{
+ /* Always send detach because the test is completed at this point */
+ fill_get_buf("D");
+}
+
+static int put_cont_catch(char *put_str, char *arg)
+{
+ /* This is at the end of the test and we catch any and all input */
+ v2printk("kgdbts: cleanup task: %lx\n", sstep_thread_id);
+ ts.idx--;
+ return 0;
+}
+
+static int emul_reset(char *put_str, char *arg)
+{
+ if (strncmp(put_str, "$OK", 3))
+ return 1;
+ if (restart_from_top_after_write) {
+ restart_from_top_after_write = 0;
+ ts.idx = -1;
+ }
+ return 0;
+}
+
+static void emul_sstep_get(char *arg)
+{
+ if (!arch_needs_sstep_emulation) {
+ if (cont_instead_of_sstep) {
+ cont_instead_of_sstep = 0;
+ fill_get_buf("c");
+ } else {
+ fill_get_buf(arg);
+ }
+ return;
+ }
+ switch (sstep_state) {
+ case 0:
+ v2printk("Emulate single step\n");
+ /* Start by looking at the current PC */
+ fill_get_buf("g");
+ break;
+ case 1:
+ /* set breakpoint */
+ break_helper("Z0", NULL, sstep_addr);
+ break;
+ case 2:
+ /* Continue */
+ fill_get_buf("c");
+ break;
+ case 3:
+ /* Clear breakpoint */
+ break_helper("z0", NULL, sstep_addr);
+ break;
+ default:
+ eprintk("kgdbts: ERROR failed sstep get emulation\n");
+ }
+ sstep_state++;
+}
+
+static int emul_sstep_put(char *put_str, char *arg)
+{
+ if (!arch_needs_sstep_emulation) {
+ char *ptr = &put_str[11];
+ if (put_str[1] != 'T' || put_str[2] != '0')
+ return 1;
+ kgdb_hex2long(&ptr, &sstep_thread_id);
+ return 0;
+ }
+ switch (sstep_state) {
+ case 1:
+ /* validate the "g" packet to get the IP */
+ kgdb_hex2mem(&put_str[1], (char *)kgdbts_gdb_regs,
+ NUMREGBYTES);
+ gdb_regs_to_pt_regs(kgdbts_gdb_regs, &kgdbts_regs);
+ v2printk("Stopped at IP: %lx\n",
+ instruction_pointer(&kgdbts_regs));
+ /* Want to stop at IP + break instruction size by default */
+ sstep_addr = cont_addr + BREAK_INSTR_SIZE;
+ break;
+ case 2:
+ if (strncmp(put_str, "$OK", 3)) {
+ eprintk("kgdbts: failed sstep break set\n");
+ return 1;
+ }
+ break;
+ case 3:
+ if (strncmp(put_str, "$T0", 3)) {
+ eprintk("kgdbts: failed continue sstep\n");
+ return 1;
+ } else {
+ char *ptr = &put_str[11];
+ kgdb_hex2long(&ptr, &sstep_thread_id);
+ }
+ break;
+ case 4:
+ if (strncmp(put_str, "$OK", 3)) {
+ eprintk("kgdbts: failed sstep break unset\n");
+ return 1;
+ }
+ /* Single step is complete so continue on! */
+ sstep_state = 0;
+ return 0;
+ default:
+ eprintk("kgdbts: ERROR failed sstep put emulation\n");
+ }
+
+ /* Continue on the same test line until emulation is complete */
+ ts.idx--;
+ return 0;
+}
+
+static int final_ack_set(char *put_str, char *arg)
+{
+ if (strncmp(put_str+1, arg, 2))
+ return 1;
+ final_ack = 1;
+ return 0;
+}
+/*
+ * Test to plant a breakpoint and detach, which should clear out the
+ * breakpoint and restore the original instruction.
+ */
+static struct test_struct plant_and_detach_test[] = {
+ { "?", "S0*" }, /* Clear break points */
+ { "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */
+ { "D", "OK" }, /* Detach */
+ { "", "" },
+};
+
+/*
+ * Simple test to write in a software breakpoint, check for the
+ * correct stop location and detach.
+ */
+static struct test_struct sw_breakpoint_test[] = {
+ { "?", "S0*" }, /* Clear break points */
+ { "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */
+ { "c", "T0*", }, /* Continue */
+ { "g", "kgdbts_break_test", NULL, check_and_rewind_pc },
+ { "write", "OK", write_regs },
+ { "kgdbts_break_test", "OK", sw_rem_break }, /*remove breakpoint */
+ { "D", "OK" }, /* Detach */
+ { "D", "OK", NULL, got_break }, /* On success we made it here */
+ { "", "" },
+};
+
+/*
+ * Test a known bad memory read location to test the fault handler and
+ * read bytes 1-8 at the bad address
+ */
+static struct test_struct bad_read_test[] = {
+ { "?", "S0*" }, /* Clear break points */
+ { "m0,1", "E*" }, /* read 1 byte at address 1 */
+ { "m0,2", "E*" }, /* read 1 byte at address 2 */
+ { "m0,3", "E*" }, /* read 1 byte at address 3 */
+ { "m0,4", "E*" }, /* read 1 byte at address 4 */
+ { "m0,5", "E*" }, /* read 1 byte at address 5 */
+ { "m0,6", "E*" }, /* read 1 byte at address 6 */
+ { "m0,7", "E*" }, /* read 1 byte at address 7 */
+ { "m0,8", "E*" }, /* read 1 byte at address 8 */
+ { "D", "OK" }, /* Detach which removes all breakpoints and continues */
+ { "", "" },
+};
+
+/*
+ * Test for hitting a breakpoint, remove it, single step, plant it
+ * again and detach.
+ */
+static struct test_struct singlestep_break_test[] = {
+ { "?", "S0*" }, /* Clear break points */
+ { "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */
+ { "c", "T0*", NULL, get_thread_id_continue }, /* Continue */
+ { "kgdbts_break_test", "OK", sw_rem_break }, /*remove breakpoint */
+ { "g", "kgdbts_break_test", NULL, check_and_rewind_pc },
+ { "write", "OK", write_regs }, /* Write registers */
+ { "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */
+ { "g", "kgdbts_break_test", NULL, check_single_step },
+ { "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */
+ { "c", "T0*", }, /* Continue */
+ { "g", "kgdbts_break_test", NULL, check_and_rewind_pc },
+ { "write", "OK", write_regs }, /* Write registers */
+ { "D", "OK" }, /* Remove all breakpoints and continues */
+ { "", "" },
+};
+
+/*
+ * Test for hitting a breakpoint at do_fork for what ever the number
+ * of iterations required by the variable repeat_test.
+ */
+static struct test_struct do_fork_test[] = {
+ { "?", "S0*" }, /* Clear break points */
+ { "do_fork", "OK", sw_break, }, /* set sw breakpoint */
+ { "c", "T0*", NULL, get_thread_id_continue }, /* Continue */
+ { "do_fork", "OK", sw_rem_break }, /*remove breakpoint */
+ { "g", "do_fork", NULL, check_and_rewind_pc }, /* check location */
+ { "write", "OK", write_regs, emul_reset }, /* Write registers */
+ { "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */
+ { "g", "do_fork", NULL, check_single_step },
+ { "do_fork", "OK", sw_break, }, /* set sw breakpoint */
+ { "7", "T0*", skip_back_repeat_test }, /* Loop based on repeat_test */
+ { "D", "OK", NULL, final_ack_set }, /* detach and unregister I/O */
+ { "", "", get_cont_catch, put_cont_catch },
+};
+
+/* Test for hitting a breakpoint at sys_open for what ever the number
+ * of iterations required by the variable repeat_test.
+ */
+static struct test_struct sys_open_test[] = {
+ { "?", "S0*" }, /* Clear break points */
+ { "sys_open", "OK", sw_break, }, /* set sw breakpoint */
+ { "c", "T0*", NULL, get_thread_id_continue }, /* Continue */
+ { "sys_open", "OK", sw_rem_break }, /*remove breakpoint */
+ { "g", "sys_open", NULL, check_and_rewind_pc }, /* check location */
+ { "write", "OK", write_regs, emul_reset }, /* Write registers */
+ { "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */
+ { "g", "sys_open", NULL, check_single_step },
+ { "sys_open", "OK", sw_break, }, /* set sw breakpoint */
+ { "7", "T0*", skip_back_repeat_test }, /* Loop based on repeat_test */
+ { "D", "OK", NULL, final_ack_set }, /* detach and unregister I/O */
+ { "", "", get_cont_catch, put_cont_catch },
+};
+
+/*
+ * Test for hitting a simple hw breakpoint
+ */
+static struct test_struct hw_breakpoint_test[] = {
+ { "?", "S0*" }, /* Clear break points */
+ { "kgdbts_break_test", "OK", hw_break, }, /* set hw breakpoint */
+ { "c", "T0*", }, /* Continue */
+ { "g", "kgdbts_break_test", NULL, check_and_rewind_pc },
+ { "write", "OK", write_regs },
+ { "kgdbts_break_test", "OK", hw_rem_break }, /*remove breakpoint */
+ { "D", "OK" }, /* Detach */
+ { "D", "OK", NULL, got_break }, /* On success we made it here */
+ { "", "" },
+};
+
+/*
+ * Test for hitting a hw write breakpoint
+ */
+static struct test_struct hw_write_break_test[] = {
+ { "?", "S0*" }, /* Clear break points */
+ { "hw_break_val", "OK", hw_write_break, }, /* set hw breakpoint */
+ { "c", "T0*", NULL, got_break }, /* Continue */
+ { "g", "silent", NULL, check_and_rewind_pc },
+ { "write", "OK", write_regs },
+ { "hw_break_val", "OK", hw_rem_write_break }, /*remove breakpoint */
+ { "D", "OK" }, /* Detach */
+ { "D", "OK", NULL, got_break }, /* On success we made it here */
+ { "", "" },
+};
+
+/*
+ * Test for hitting a hw access breakpoint
+ */
+static struct test_struct hw_access_break_test[] = {
+ { "?", "S0*" }, /* Clear break points */
+ { "hw_break_val", "OK", hw_access_break, }, /* set hw breakpoint */
+ { "c", "T0*", NULL, got_break }, /* Continue */
+ { "g", "silent", NULL, check_and_rewind_pc },
+ { "write", "OK", write_regs },
+ { "hw_break_val", "OK", hw_rem_access_break }, /*remove breakpoint */
+ { "D", "OK" }, /* Detach */
+ { "D", "OK", NULL, got_break }, /* On success we made it here */
+ { "", "" },
+};
+
+/*
+ * Test for hitting a hw access breakpoint
+ */
+static struct test_struct nmi_sleep_test[] = {
+ { "?", "S0*" }, /* Clear break points */
+ { "c", "T0*", NULL, got_break }, /* Continue */
+ { "D", "OK" }, /* Detach */
+ { "D", "OK", NULL, got_break }, /* On success we made it here */
+ { "", "" },
+};
+
+static void fill_get_buf(char *buf)
+{
+ unsigned char checksum = 0;
+ int count = 0;
+ char ch;
+
+ strcpy(get_buf, "$");
+ strcat(get_buf, buf);
+ while ((ch = buf[count])) {
+ checksum += ch;
+ count++;
+ }
+ strcat(get_buf, "#");
+ get_buf[count + 2] = hex_asc_hi(checksum);
+ get_buf[count + 3] = hex_asc_lo(checksum);
+ get_buf[count + 4] = '\0';
+ v2printk("get%i: %s\n", ts.idx, get_buf);
+}
+
+static int validate_simple_test(char *put_str)
+{
+ char *chk_str;
+
+ if (ts.tst[ts.idx].put_handler)
+ return ts.tst[ts.idx].put_handler(put_str,
+ ts.tst[ts.idx].put);
+
+ chk_str = ts.tst[ts.idx].put;
+ if (*put_str == '$')
+ put_str++;
+
+ while (*chk_str != '\0' && *put_str != '\0') {
+ /* If someone does a * to match the rest of the string, allow
+ * it, or stop if the received string is complete.
+ */
+ if (*put_str == '#' || *chk_str == '*')
+ return 0;
+ if (*put_str != *chk_str)
+ return 1;
+
+ chk_str++;
+ put_str++;
+ }
+ if (*chk_str == '\0' && (*put_str == '\0' || *put_str == '#'))
+ return 0;
+
+ return 1;
+}
+
+static int run_simple_test(int is_get_char, int chr)
+{
+ int ret = 0;
+ if (is_get_char) {
+ /* Send an ACK on the get if a prior put completed and set the
+ * send ack variable
+ */
+ if (send_ack) {
+ send_ack = 0;
+ return '+';
+ }
+ /* On the first get char, fill the transmit buffer and then
+ * take from the get_string.
+ */
+ if (get_buf_cnt == 0) {
+ if (ts.tst[ts.idx].get_handler)
+ ts.tst[ts.idx].get_handler(ts.tst[ts.idx].get);
+ else
+ fill_get_buf(ts.tst[ts.idx].get);
+ }
+
+ if (get_buf[get_buf_cnt] == '\0') {
+ eprintk("kgdbts: ERROR GET: EOB on '%s' at %i\n",
+ ts.name, ts.idx);
+ get_buf_cnt = 0;
+ fill_get_buf("D");
+ }
+ ret = get_buf[get_buf_cnt];
+ get_buf_cnt++;
+ return ret;
+ }
+
+ /* This callback is a put char which is when kgdb sends data to
+ * this I/O module.
+ */
+ if (ts.tst[ts.idx].get[0] == '\0' && ts.tst[ts.idx].put[0] == '\0' &&
+ !ts.tst[ts.idx].get_handler) {
+ eprintk("kgdbts: ERROR: beyond end of test on"
+ " '%s' line %i\n", ts.name, ts.idx);
+ return 0;
+ }
+
+ if (put_buf_cnt >= BUFMAX) {
+ eprintk("kgdbts: ERROR: put buffer overflow on"
+ " '%s' line %i\n", ts.name, ts.idx);
+ put_buf_cnt = 0;
+ return 0;
+ }
+ /* Ignore everything until the first valid packet start '$' */
+ if (put_buf_cnt == 0 && chr != '$')
+ return 0;
+
+ put_buf[put_buf_cnt] = chr;
+ put_buf_cnt++;
+
+ /* End of packet == #XX so look for the '#' */
+ if (put_buf_cnt > 3 && put_buf[put_buf_cnt - 3] == '#') {
+ if (put_buf_cnt >= BUFMAX) {
+ eprintk("kgdbts: ERROR: put buffer overflow on"
+ " '%s' line %i\n", ts.name, ts.idx);
+ put_buf_cnt = 0;
+ return 0;
+ }
+ put_buf[put_buf_cnt] = '\0';
+ v2printk("put%i: %s\n", ts.idx, put_buf);
+ /* Trigger check here */
+ if (ts.validate_put && ts.validate_put(put_buf)) {
+ eprintk("kgdbts: ERROR PUT: end of test "
+ "buffer on '%s' line %i expected %s got %s\n",
+ ts.name, ts.idx, ts.tst[ts.idx].put, put_buf);
+ }
+ ts.idx++;
+ put_buf_cnt = 0;
+ get_buf_cnt = 0;
+ send_ack = 1;
+ }
+ return 0;
+}
+
+static void init_simple_test(void)
+{
+ memset(&ts, 0, sizeof(ts));
+ ts.run_test = run_simple_test;
+ ts.validate_put = validate_simple_test;
+}
+
+static void run_plant_and_detach_test(int is_early)
+{
+ char before[BREAK_INSTR_SIZE];
+ char after[BREAK_INSTR_SIZE];
+
+ probe_kernel_read(before, (char *)kgdbts_break_test,
+ BREAK_INSTR_SIZE);
+ init_simple_test();
+ ts.tst = plant_and_detach_test;
+ ts.name = "plant_and_detach_test";
+ /* Activate test with initial breakpoint */
+ if (!is_early)
+ kgdb_breakpoint();
+ probe_kernel_read(after, (char *)kgdbts_break_test,
+ BREAK_INSTR_SIZE);
+ if (memcmp(before, after, BREAK_INSTR_SIZE)) {
+ printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
+ panic("kgdb memory corruption");
+ }
+
+ /* complete the detach test */
+ if (!is_early)
+ kgdbts_break_test();
+}
+
+static void run_breakpoint_test(int is_hw_breakpoint)
+{
+ test_complete = 0;
+ init_simple_test();
+ if (is_hw_breakpoint) {
+ ts.tst = hw_breakpoint_test;
+ ts.name = "hw_breakpoint_test";
+ } else {
+ ts.tst = sw_breakpoint_test;
+ ts.name = "sw_breakpoint_test";
+ }
+ /* Activate test with initial breakpoint */
+ kgdb_breakpoint();
+ /* run code with the break point in it */
+ kgdbts_break_test();
+ kgdb_breakpoint();
+
+ if (test_complete)
+ return;
+
+ eprintk("kgdbts: ERROR %s test failed\n", ts.name);
+ if (is_hw_breakpoint)
+ hwbreaks_ok = 0;
+}
+
+static void run_hw_break_test(int is_write_test)
+{
+ test_complete = 0;
+ init_simple_test();
+ if (is_write_test) {
+ ts.tst = hw_write_break_test;
+ ts.name = "hw_write_break_test";
+ } else {
+ ts.tst = hw_access_break_test;
+ ts.name = "hw_access_break_test";
+ }
+ /* Activate test with initial breakpoint */
+ kgdb_breakpoint();
+ hw_break_val_access();
+ if (is_write_test) {
+ if (test_complete == 2) {
+ eprintk("kgdbts: ERROR %s broke on access\n",
+ ts.name);
+ hwbreaks_ok = 0;
+ }
+ hw_break_val_write();
+ }
+ kgdb_breakpoint();
+
+ if (test_complete == 1)
+ return;
+
+ eprintk("kgdbts: ERROR %s test failed\n", ts.name);
+ hwbreaks_ok = 0;
+}
+
+static void run_nmi_sleep_test(int nmi_sleep)
+{
+ unsigned long flags;
+
+ init_simple_test();
+ ts.tst = nmi_sleep_test;
+ ts.name = "nmi_sleep_test";
+ /* Activate test with initial breakpoint */
+ kgdb_breakpoint();
+ local_irq_save(flags);
+ mdelay(nmi_sleep*1000);
+ touch_nmi_watchdog();
+ local_irq_restore(flags);
+ if (test_complete != 2)
+ eprintk("kgdbts: ERROR nmi_test did not hit nmi\n");
+ kgdb_breakpoint();
+ if (test_complete == 1)
+ return;
+
+ eprintk("kgdbts: ERROR %s test failed\n", ts.name);
+}
+
+static void run_bad_read_test(void)
+{
+ init_simple_test();
+ ts.tst = bad_read_test;
+ ts.name = "bad_read_test";
+ /* Activate test with initial breakpoint */
+ kgdb_breakpoint();
+}
+
+static void run_do_fork_test(void)
+{
+ init_simple_test();
+ ts.tst = do_fork_test;
+ ts.name = "do_fork_test";
+ /* Activate test with initial breakpoint */
+ kgdb_breakpoint();
+}
+
+static void run_sys_open_test(void)
+{
+ init_simple_test();
+ ts.tst = sys_open_test;
+ ts.name = "sys_open_test";
+ /* Activate test with initial breakpoint */
+ kgdb_breakpoint();
+}
+
+static void run_singlestep_break_test(void)
+{
+ init_simple_test();
+ ts.tst = singlestep_break_test;
+ ts.name = "singlestep_breakpoint_test";
+ /* Activate test with initial breakpoint */
+ kgdb_breakpoint();
+ kgdbts_break_test();
+ kgdbts_break_test();
+}
+
+static void kgdbts_run_tests(void)
+{
+ char *ptr;
+ int fork_test = 0;
+ int do_sys_open_test = 0;
+ int sstep_test = 1000;
+ int nmi_sleep = 0;
+ int i;
+
+ ptr = strchr(config, 'F');
+ if (ptr)
+ fork_test = simple_strtol(ptr + 1, NULL, 10);
+ ptr = strchr(config, 'S');
+ if (ptr)
+ do_sys_open_test = simple_strtol(ptr + 1, NULL, 10);
+ ptr = strchr(config, 'N');
+ if (ptr)
+ nmi_sleep = simple_strtol(ptr+1, NULL, 10);
+ ptr = strchr(config, 'I');
+ if (ptr)
+ sstep_test = simple_strtol(ptr+1, NULL, 10);
+
+ /* All HW break point tests */
+ if (arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT) {
+ hwbreaks_ok = 1;
+ v1printk("kgdbts:RUN hw breakpoint test\n");
+ run_breakpoint_test(1);
+ v1printk("kgdbts:RUN hw write breakpoint test\n");
+ run_hw_break_test(1);
+ v1printk("kgdbts:RUN access write breakpoint test\n");
+ run_hw_break_test(0);
+ }
+
+ /* required internal KGDB tests */
+ v1printk("kgdbts:RUN plant and detach test\n");
+ run_plant_and_detach_test(0);
+ v1printk("kgdbts:RUN sw breakpoint test\n");
+ run_breakpoint_test(0);
+ v1printk("kgdbts:RUN bad memory access test\n");
+ run_bad_read_test();
+ v1printk("kgdbts:RUN singlestep test %i iterations\n", sstep_test);
+ for (i = 0; i < sstep_test; i++) {
+ run_singlestep_break_test();
+ if (i % 100 == 0)
+ v1printk("kgdbts:RUN singlestep [%i/%i]\n",
+ i, sstep_test);
+ }
+
+ /* ===Optional tests=== */
+
+ if (nmi_sleep) {
+ v1printk("kgdbts:RUN NMI sleep %i seconds test\n", nmi_sleep);
+ run_nmi_sleep_test(nmi_sleep);
+ }
+
+ /* If the do_fork test is run it will be the last test that is
+ * executed because a kernel thread will be spawned at the very
+ * end to unregister the debug hooks.
+ */
+ if (fork_test) {
+ repeat_test = fork_test;
+ printk(KERN_INFO "kgdbts:RUN do_fork for %i breakpoints\n",
+ repeat_test);
+ kthread_run(kgdbts_unreg_thread, NULL, "kgdbts_unreg");
+ run_do_fork_test();
+ return;
+ }
+
+ /* If the sys_open test is run it will be the last test that is
+ * executed because a kernel thread will be spawned at the very
+ * end to unregister the debug hooks.
+ */
+ if (do_sys_open_test) {
+ repeat_test = do_sys_open_test;
+ printk(KERN_INFO "kgdbts:RUN sys_open for %i breakpoints\n",
+ repeat_test);
+ kthread_run(kgdbts_unreg_thread, NULL, "kgdbts_unreg");
+ run_sys_open_test();
+ return;
+ }
+ /* Shutdown and unregister */
+ kgdb_unregister_io_module(&kgdbts_io_ops);
+ configured = 0;
+}
+
+static int kgdbts_option_setup(char *opt)
+{
+ if (strlen(opt) >= MAX_CONFIG_LEN) {
+ printk(KERN_ERR "kgdbts: config string too long\n");
+ return -ENOSPC;
+ }
+ strcpy(config, opt);
+
+ verbose = 0;
+ if (strstr(config, "V1"))
+ verbose = 1;
+ if (strstr(config, "V2"))
+ verbose = 2;
+
+ return 0;
+}
+
+__setup("kgdbts=", kgdbts_option_setup);
+
+static int configure_kgdbts(void)
+{
+ int err = 0;
+
+ if (!strlen(config) || isspace(config[0]))
+ goto noconfig;
+ err = kgdbts_option_setup(config);
+ if (err)
+ goto noconfig;
+
+ final_ack = 0;
+ run_plant_and_detach_test(1);
+
+ err = kgdb_register_io_module(&kgdbts_io_ops);
+ if (err) {
+ configured = 0;
+ return err;
+ }
+ configured = 1;
+ kgdbts_run_tests();
+
+ return err;
+
+noconfig:
+ config[0] = 0;
+ configured = 0;
+
+ return err;
+}
+
+static int __init init_kgdbts(void)
+{
+ /* Already configured? */
+ if (configured == 1)
+ return 0;
+
+ return configure_kgdbts();
+}
+
+static int kgdbts_get_char(void)
+{
+ int val = 0;
+
+ if (ts.run_test)
+ val = ts.run_test(1, 0);
+
+ return val;
+}
+
+static void kgdbts_put_char(u8 chr)
+{
+ if (ts.run_test)
+ ts.run_test(0, chr);
+}
+
+static int param_set_kgdbts_var(const char *kmessage, struct kernel_param *kp)
+{
+ int len = strlen(kmessage);
+
+ if (len >= MAX_CONFIG_LEN) {
+ printk(KERN_ERR "kgdbts: config string too long\n");
+ return -ENOSPC;
+ }
+
+ /* Only copy in the string if the init function has not run yet */
+ if (configured < 0) {
+ strcpy(config, kmessage);
+ return 0;
+ }
+
+ if (configured == 1) {
+ printk(KERN_ERR "kgdbts: ERROR: Already configured and running.\n");
+ return -EBUSY;
+ }
+
+ strcpy(config, kmessage);
+ /* Chop out \n char as a result of echo */
+ if (config[len - 1] == '\n')
+ config[len - 1] = '\0';
+
+ /* Go and configure with the new params. */
+ return configure_kgdbts();
+}
+
+static void kgdbts_pre_exp_handler(void)
+{
+ /* Increment the module count when the debugger is active */
+ if (!kgdb_connected)
+ try_module_get(THIS_MODULE);
+}
+
+static void kgdbts_post_exp_handler(void)
+{
+ /* decrement the module count when the debugger detaches */
+ if (!kgdb_connected)
+ module_put(THIS_MODULE);
+}
+
+static struct kgdb_io kgdbts_io_ops = {
+ .name = "kgdbts",
+ .read_char = kgdbts_get_char,
+ .write_char = kgdbts_put_char,
+ .pre_exception = kgdbts_pre_exp_handler,
+ .post_exception = kgdbts_post_exp_handler,
+};
+
+module_init(init_kgdbts);
+module_param_call(kgdbts, param_set_kgdbts_var, param_get_string, &kps, 0644);
+MODULE_PARM_DESC(kgdbts, "<A|V1|V2>[F#|S#][N#]");
+MODULE_DESCRIPTION("KGDB Test Suite");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Wind River Systems, Inc.");
+
diff --git a/drivers/misc/lis3lv02d/Kconfig b/drivers/misc/lis3lv02d/Kconfig
new file mode 100644
index 00000000..8f474e6f
--- /dev/null
+++ b/drivers/misc/lis3lv02d/Kconfig
@@ -0,0 +1,37 @@
+#
+# STMicroelectonics LIS3LV02D and similar accelerometers
+#
+
+config SENSORS_LIS3_SPI
+ tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer (SPI)"
+ depends on !ACPI && SPI_MASTER && INPUT
+ select SENSORS_LIS3LV02D
+ default n
+ help
+ This driver provides support for the LIS3LV02Dx accelerometer connected
+ via SPI. The accelerometer data is readable via
+ /sys/devices/platform/lis3lv02d.
+
+ This driver also provides an absolute input class device, allowing
+ the laptop to act as a pinball machine-esque joystick.
+
+ This driver can also be built as modules. If so, the core module
+ will be called lis3lv02d and a specific module for the SPI transport
+ is called lis3lv02d_spi.
+
+config SENSORS_LIS3_I2C
+ tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer (I2C)"
+ depends on I2C && INPUT
+ select SENSORS_LIS3LV02D
+ default n
+ help
+ This driver provides support for the LIS3LV02Dx accelerometer connected
+ via I2C. The accelerometer data is readable via
+ /sys/devices/platform/lis3lv02d.
+
+ This driver also provides an absolute input class device, allowing
+ the device to act as a pinball machine-esque joystick.
+
+ This driver can also be built as modules. If so, the core module
+ will be called lis3lv02d and a specific module for the I2C transport
+ is called lis3lv02d_i2c.
diff --git a/drivers/misc/lis3lv02d/Makefile b/drivers/misc/lis3lv02d/Makefile
new file mode 100644
index 00000000..4bf58b16
--- /dev/null
+++ b/drivers/misc/lis3lv02d/Makefile
@@ -0,0 +1,7 @@
+#
+# STMicroelectonics LIS3LV02D and similar accelerometers
+#
+
+obj-$(CONFIG_SENSORS_LIS3LV02D) += lis3lv02d.o
+obj-$(CONFIG_SENSORS_LIS3_SPI) += lis3lv02d_spi.o
+obj-$(CONFIG_SENSORS_LIS3_I2C) += lis3lv02d_i2c.o
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
new file mode 100644
index 00000000..a981e2a4
--- /dev/null
+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
@@ -0,0 +1,1054 @@
+/*
+ * lis3lv02d.c - ST LIS3LV02DL accelerometer driver
+ *
+ * Copyright (C) 2007-2008 Yan Burman
+ * Copyright (C) 2008 Eric Piel
+ * Copyright (C) 2008-2009 Pavel Machek
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/dmi.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/input-polldev.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/freezer.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/pm_runtime.h>
+#include <linux/atomic.h>
+#include "lis3lv02d.h"
+
+#define DRIVER_NAME "lis3lv02d"
+
+/* joystick device poll interval in milliseconds */
+#define MDPS_POLL_INTERVAL 50
+#define MDPS_POLL_MIN 0
+#define MDPS_POLL_MAX 2000
+
+#define LIS3_SYSFS_POWERDOWN_DELAY 5000 /* In milliseconds */
+
+#define SELFTEST_OK 0
+#define SELFTEST_FAIL -1
+#define SELFTEST_IRQ -2
+
+#define IRQ_LINE0 0
+#define IRQ_LINE1 1
+
+/*
+ * The sensor can also generate interrupts (DRDY) but it's pretty pointless
+ * because they are generated even if the data do not change. So it's better
+ * to keep the interrupt for the free-fall event. The values are updated at
+ * 40Hz (at the lowest frequency), but as it can be pretty time consuming on
+ * some low processor, we poll the sensor only at 20Hz... enough for the
+ * joystick.
+ */
+
+#define LIS3_PWRON_DELAY_WAI_12B (5000)
+#define LIS3_PWRON_DELAY_WAI_8B (3000)
+
+/*
+ * LIS3LV02D spec says 1024 LSBs corresponds 1 G -> 1LSB is 1000/1024 mG
+ * LIS302D spec says: 18 mG / digit
+ * LIS3_ACCURACY is used to increase accuracy of the intermediate
+ * calculation results.
+ */
+#define LIS3_ACCURACY 1024
+/* Sensitivity values for -2G +2G scale */
+#define LIS3_SENSITIVITY_12B ((LIS3_ACCURACY * 1000) / 1024)
+#define LIS3_SENSITIVITY_8B (18 * LIS3_ACCURACY)
+
+#define LIS3_DEFAULT_FUZZ_12B 3
+#define LIS3_DEFAULT_FLAT_12B 3
+#define LIS3_DEFAULT_FUZZ_8B 1
+#define LIS3_DEFAULT_FLAT_8B 1
+
+struct lis3lv02d lis3_dev = {
+ .misc_wait = __WAIT_QUEUE_HEAD_INITIALIZER(lis3_dev.misc_wait),
+};
+EXPORT_SYMBOL_GPL(lis3_dev);
+
+/* just like param_set_int() but does sanity-check so that it won't point
+ * over the axis array size
+ */
+static int param_set_axis(const char *val, const struct kernel_param *kp)
+{
+ int ret = param_set_int(val, kp);
+ if (!ret) {
+ int val = *(int *)kp->arg;
+ if (val < 0)
+ val = -val;
+ if (!val || val > 3)
+ return -EINVAL;
+ }
+ return ret;
+}
+
+static struct kernel_param_ops param_ops_axis = {
+ .set = param_set_axis,
+ .get = param_get_int,
+};
+
+#define param_check_axis(name, p) param_check_int(name, p)
+
+module_param_array_named(axes, lis3_dev.ac.as_array, axis, NULL, 0644);
+MODULE_PARM_DESC(axes, "Axis-mapping for x,y,z directions");
+
+static s16 lis3lv02d_read_8(struct lis3lv02d *lis3, int reg)
+{
+ s8 lo;
+ if (lis3->read(lis3, reg, &lo) < 0)
+ return 0;
+
+ return lo;
+}
+
+static s16 lis3lv02d_read_12(struct lis3lv02d *lis3, int reg)
+{
+ u8 lo, hi;
+
+ lis3->read(lis3, reg - 1, &lo);
+ lis3->read(lis3, reg, &hi);
+ /* In "12 bit right justified" mode, bit 6, bit 7, bit 8 = bit 5 */
+ return (s16)((hi << 8) | lo);
+}
+
+/**
+ * lis3lv02d_get_axis - For the given axis, give the value converted
+ * @axis: 1,2,3 - can also be negative
+ * @hw_values: raw values returned by the hardware
+ *
+ * Returns the converted value.
+ */
+static inline int lis3lv02d_get_axis(s8 axis, int hw_values[3])
+{
+ if (axis > 0)
+ return hw_values[axis - 1];
+ else
+ return -hw_values[-axis - 1];
+}
+
+/**
+ * lis3lv02d_get_xyz - Get X, Y and Z axis values from the accelerometer
+ * @lis3: pointer to the device struct
+ * @x: where to store the X axis value
+ * @y: where to store the Y axis value
+ * @z: where to store the Z axis value
+ *
+ * Note that 40Hz input device can eat up about 10% CPU at 800MHZ
+ */
+static void lis3lv02d_get_xyz(struct lis3lv02d *lis3, int *x, int *y, int *z)
+{
+ int position[3];
+ int i;
+
+ if (lis3->blkread) {
+ if (lis3->whoami == WAI_12B) {
+ u16 data[3];
+ lis3->blkread(lis3, OUTX_L, 6, (u8 *)data);
+ for (i = 0; i < 3; i++)
+ position[i] = (s16)le16_to_cpu(data[i]);
+ } else {
+ u8 data[5];
+ /* Data: x, dummy, y, dummy, z */
+ lis3->blkread(lis3, OUTX, 5, data);
+ for (i = 0; i < 3; i++)
+ position[i] = (s8)data[i * 2];
+ }
+ } else {
+ position[0] = lis3->read_data(lis3, OUTX);
+ position[1] = lis3->read_data(lis3, OUTY);
+ position[2] = lis3->read_data(lis3, OUTZ);
+ }
+
+ for (i = 0; i < 3; i++)
+ position[i] = (position[i] * lis3->scale) / LIS3_ACCURACY;
+
+ *x = lis3lv02d_get_axis(lis3->ac.x, position);
+ *y = lis3lv02d_get_axis(lis3->ac.y, position);
+ *z = lis3lv02d_get_axis(lis3->ac.z, position);
+}
+
+/* conversion btw sampling rate and the register values */
+static int lis3_12_rates[4] = {40, 160, 640, 2560};
+static int lis3_8_rates[2] = {100, 400};
+static int lis3_3dc_rates[16] = {0, 1, 10, 25, 50, 100, 200, 400, 1600, 5000};
+
+/* ODR is Output Data Rate */
+static int lis3lv02d_get_odr(struct lis3lv02d *lis3)
+{
+ u8 ctrl;
+ int shift;
+
+ lis3->read(lis3, CTRL_REG1, &ctrl);
+ ctrl &= lis3->odr_mask;
+ shift = ffs(lis3->odr_mask) - 1;
+ return lis3->odrs[(ctrl >> shift)];
+}
+
+static int lis3lv02d_get_pwron_wait(struct lis3lv02d *lis3)
+{
+ int div = lis3lv02d_get_odr(lis3);
+
+ if (WARN_ONCE(div == 0, "device returned spurious data"))
+ return -ENXIO;
+
+ /* LIS3 power on delay is quite long */
+ msleep(lis3->pwron_delay / div);
+ return 0;
+}
+
+static int lis3lv02d_set_odr(struct lis3lv02d *lis3, int rate)
+{
+ u8 ctrl;
+ int i, len, shift;
+
+ if (!rate)
+ return -EINVAL;
+
+ lis3->read(lis3, CTRL_REG1, &ctrl);
+ ctrl &= ~lis3->odr_mask;
+ len = 1 << hweight_long(lis3->odr_mask); /* # of possible values */
+ shift = ffs(lis3->odr_mask) - 1;
+
+ for (i = 0; i < len; i++)
+ if (lis3->odrs[i] == rate) {
+ lis3->write(lis3, CTRL_REG1,
+ ctrl | (i << shift));
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int lis3lv02d_selftest(struct lis3lv02d *lis3, s16 results[3])
+{
+ u8 ctlreg, reg;
+ s16 x, y, z;
+ u8 selftest;
+ int ret;
+ u8 ctrl_reg_data;
+ unsigned char irq_cfg;
+
+ mutex_lock(&lis3->mutex);
+
+ irq_cfg = lis3->irq_cfg;
+ if (lis3->whoami == WAI_8B) {
+ lis3->data_ready_count[IRQ_LINE0] = 0;
+ lis3->data_ready_count[IRQ_LINE1] = 0;
+
+ /* Change interrupt cfg to data ready for selftest */
+ atomic_inc(&lis3->wake_thread);
+ lis3->irq_cfg = LIS3_IRQ1_DATA_READY | LIS3_IRQ2_DATA_READY;
+ lis3->read(lis3, CTRL_REG3, &ctrl_reg_data);
+ lis3->write(lis3, CTRL_REG3, (ctrl_reg_data &
+ ~(LIS3_IRQ1_MASK | LIS3_IRQ2_MASK)) |
+ (LIS3_IRQ1_DATA_READY | LIS3_IRQ2_DATA_READY));
+ }
+
+ if (lis3->whoami == WAI_3DC) {
+ ctlreg = CTRL_REG4;
+ selftest = CTRL4_ST0;
+ } else {
+ ctlreg = CTRL_REG1;
+ if (lis3->whoami == WAI_12B)
+ selftest = CTRL1_ST;
+ else
+ selftest = CTRL1_STP;
+ }
+
+ lis3->read(lis3, ctlreg, &reg);
+ lis3->write(lis3, ctlreg, (reg | selftest));
+ ret = lis3lv02d_get_pwron_wait(lis3);
+ if (ret)
+ goto fail;
+
+ /* Read directly to avoid axis remap */
+ x = lis3->read_data(lis3, OUTX);
+ y = lis3->read_data(lis3, OUTY);
+ z = lis3->read_data(lis3, OUTZ);
+
+ /* back to normal settings */
+ lis3->write(lis3, ctlreg, reg);
+ ret = lis3lv02d_get_pwron_wait(lis3);
+ if (ret)
+ goto fail;
+
+ results[0] = x - lis3->read_data(lis3, OUTX);
+ results[1] = y - lis3->read_data(lis3, OUTY);
+ results[2] = z - lis3->read_data(lis3, OUTZ);
+
+ ret = 0;
+
+ if (lis3->whoami == WAI_8B) {
+ /* Restore original interrupt configuration */
+ atomic_dec(&lis3->wake_thread);
+ lis3->write(lis3, CTRL_REG3, ctrl_reg_data);
+ lis3->irq_cfg = irq_cfg;
+
+ if ((irq_cfg & LIS3_IRQ1_MASK) &&
+ lis3->data_ready_count[IRQ_LINE0] < 2) {
+ ret = SELFTEST_IRQ;
+ goto fail;
+ }
+
+ if ((irq_cfg & LIS3_IRQ2_MASK) &&
+ lis3->data_ready_count[IRQ_LINE1] < 2) {
+ ret = SELFTEST_IRQ;
+ goto fail;
+ }
+ }
+
+ if (lis3->pdata) {
+ int i;
+ for (i = 0; i < 3; i++) {
+ /* Check against selftest acceptance limits */
+ if ((results[i] < lis3->pdata->st_min_limits[i]) ||
+ (results[i] > lis3->pdata->st_max_limits[i])) {
+ ret = SELFTEST_FAIL;
+ goto fail;
+ }
+ }
+ }
+
+ /* test passed */
+fail:
+ mutex_unlock(&lis3->mutex);
+ return ret;
+}
+
+/*
+ * Order of registers in the list affects to order of the restore process.
+ * Perhaps it is a good idea to set interrupt enable register as a last one
+ * after all other configurations
+ */
+static u8 lis3_wai8_regs[] = { FF_WU_CFG_1, FF_WU_THS_1, FF_WU_DURATION_1,
+ FF_WU_CFG_2, FF_WU_THS_2, FF_WU_DURATION_2,
+ CLICK_CFG, CLICK_SRC, CLICK_THSY_X, CLICK_THSZ,
+ CLICK_TIMELIMIT, CLICK_LATENCY, CLICK_WINDOW,
+ CTRL_REG1, CTRL_REG2, CTRL_REG3};
+
+static u8 lis3_wai12_regs[] = {FF_WU_CFG, FF_WU_THS_L, FF_WU_THS_H,
+ FF_WU_DURATION, DD_CFG, DD_THSI_L, DD_THSI_H,
+ DD_THSE_L, DD_THSE_H,
+ CTRL_REG1, CTRL_REG3, CTRL_REG2};
+
+static inline void lis3_context_save(struct lis3lv02d *lis3)
+{
+ int i;
+ for (i = 0; i < lis3->regs_size; i++)
+ lis3->read(lis3, lis3->regs[i], &lis3->reg_cache[i]);
+ lis3->regs_stored = true;
+}
+
+static inline void lis3_context_restore(struct lis3lv02d *lis3)
+{
+ int i;
+ if (lis3->regs_stored)
+ for (i = 0; i < lis3->regs_size; i++)
+ lis3->write(lis3, lis3->regs[i], lis3->reg_cache[i]);
+}
+
+void lis3lv02d_poweroff(struct lis3lv02d *lis3)
+{
+ if (lis3->reg_ctrl)
+ lis3_context_save(lis3);
+ /* disable X,Y,Z axis and power down */
+ lis3->write(lis3, CTRL_REG1, 0x00);
+ if (lis3->reg_ctrl)
+ lis3->reg_ctrl(lis3, LIS3_REG_OFF);
+}
+EXPORT_SYMBOL_GPL(lis3lv02d_poweroff);
+
+int lis3lv02d_poweron(struct lis3lv02d *lis3)
+{
+ int err;
+ u8 reg;
+
+ lis3->init(lis3);
+
+ /*
+ * Common configuration
+ * BDU: (12 bits sensors only) LSB and MSB values are not updated until
+ * both have been read. So the value read will always be correct.
+ * Set BOOT bit to refresh factory tuning values.
+ */
+ if (lis3->pdata) {
+ lis3->read(lis3, CTRL_REG2, &reg);
+ if (lis3->whoami == WAI_12B)
+ reg |= CTRL2_BDU | CTRL2_BOOT;
+ else
+ reg |= CTRL2_BOOT_8B;
+ lis3->write(lis3, CTRL_REG2, reg);
+ }
+
+ err = lis3lv02d_get_pwron_wait(lis3);
+ if (err)
+ return err;
+
+ if (lis3->reg_ctrl)
+ lis3_context_restore(lis3);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(lis3lv02d_poweron);
+
+
+static void lis3lv02d_joystick_poll(struct input_polled_dev *pidev)
+{
+ struct lis3lv02d *lis3 = pidev->private;
+ int x, y, z;
+
+ mutex_lock(&lis3->mutex);
+ lis3lv02d_get_xyz(lis3, &x, &y, &z);
+ input_report_abs(pidev->input, ABS_X, x);
+ input_report_abs(pidev->input, ABS_Y, y);
+ input_report_abs(pidev->input, ABS_Z, z);
+ input_sync(pidev->input);
+ mutex_unlock(&lis3->mutex);
+}
+
+static void lis3lv02d_joystick_open(struct input_polled_dev *pidev)
+{
+ struct lis3lv02d *lis3 = pidev->private;
+
+ if (lis3->pm_dev)
+ pm_runtime_get_sync(lis3->pm_dev);
+
+ if (lis3->pdata && lis3->whoami == WAI_8B && lis3->idev)
+ atomic_set(&lis3->wake_thread, 1);
+ /*
+ * Update coordinates for the case where poll interval is 0 and
+ * the chip in running purely under interrupt control
+ */
+ lis3lv02d_joystick_poll(pidev);
+}
+
+static void lis3lv02d_joystick_close(struct input_polled_dev *pidev)
+{
+ struct lis3lv02d *lis3 = pidev->private;
+
+ atomic_set(&lis3->wake_thread, 0);
+ if (lis3->pm_dev)
+ pm_runtime_put(lis3->pm_dev);
+}
+
+static irqreturn_t lis302dl_interrupt(int irq, void *data)
+{
+ struct lis3lv02d *lis3 = data;
+
+ if (!test_bit(0, &lis3->misc_opened))
+ goto out;
+
+ /*
+ * Be careful: on some HP laptops the bios force DD when on battery and
+ * the lid is closed. This leads to interrupts as soon as a little move
+ * is done.
+ */
+ atomic_inc(&lis3->count);
+
+ wake_up_interruptible(&lis3->misc_wait);
+ kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
+out:
+ if (atomic_read(&lis3->wake_thread))
+ return IRQ_WAKE_THREAD;
+ return IRQ_HANDLED;
+}
+
+static void lis302dl_interrupt_handle_click(struct lis3lv02d *lis3)
+{
+ struct input_dev *dev = lis3->idev->input;
+ u8 click_src;
+
+ mutex_lock(&lis3->mutex);
+ lis3->read(lis3, CLICK_SRC, &click_src);
+
+ if (click_src & CLICK_SINGLE_X) {
+ input_report_key(dev, lis3->mapped_btns[0], 1);
+ input_report_key(dev, lis3->mapped_btns[0], 0);
+ }
+
+ if (click_src & CLICK_SINGLE_Y) {
+ input_report_key(dev, lis3->mapped_btns[1], 1);
+ input_report_key(dev, lis3->mapped_btns[1], 0);
+ }
+
+ if (click_src & CLICK_SINGLE_Z) {
+ input_report_key(dev, lis3->mapped_btns[2], 1);
+ input_report_key(dev, lis3->mapped_btns[2], 0);
+ }
+ input_sync(dev);
+ mutex_unlock(&lis3->mutex);
+}
+
+static inline void lis302dl_data_ready(struct lis3lv02d *lis3, int index)
+{
+ int dummy;
+
+ /* Dummy read to ack interrupt */
+ lis3lv02d_get_xyz(lis3, &dummy, &dummy, &dummy);
+ lis3->data_ready_count[index]++;
+}
+
+static irqreturn_t lis302dl_interrupt_thread1_8b(int irq, void *data)
+{
+ struct lis3lv02d *lis3 = data;
+ u8 irq_cfg = lis3->irq_cfg & LIS3_IRQ1_MASK;
+
+ if (irq_cfg == LIS3_IRQ1_CLICK)
+ lis302dl_interrupt_handle_click(lis3);
+ else if (unlikely(irq_cfg == LIS3_IRQ1_DATA_READY))
+ lis302dl_data_ready(lis3, IRQ_LINE0);
+ else
+ lis3lv02d_joystick_poll(lis3->idev);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t lis302dl_interrupt_thread2_8b(int irq, void *data)
+{
+ struct lis3lv02d *lis3 = data;
+ u8 irq_cfg = lis3->irq_cfg & LIS3_IRQ2_MASK;
+
+ if (irq_cfg == LIS3_IRQ2_CLICK)
+ lis302dl_interrupt_handle_click(lis3);
+ else if (unlikely(irq_cfg == LIS3_IRQ2_DATA_READY))
+ lis302dl_data_ready(lis3, IRQ_LINE1);
+ else
+ lis3lv02d_joystick_poll(lis3->idev);
+
+ return IRQ_HANDLED;
+}
+
+static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
+{
+ struct lis3lv02d *lis3 = container_of(file->private_data,
+ struct lis3lv02d, miscdev);
+
+ if (test_and_set_bit(0, &lis3->misc_opened))
+ return -EBUSY; /* already open */
+
+ if (lis3->pm_dev)
+ pm_runtime_get_sync(lis3->pm_dev);
+
+ atomic_set(&lis3->count, 0);
+ return 0;
+}
+
+static int lis3lv02d_misc_release(struct inode *inode, struct file *file)
+{
+ struct lis3lv02d *lis3 = container_of(file->private_data,
+ struct lis3lv02d, miscdev);
+
+ fasync_helper(-1, file, 0, &lis3->async_queue);
+ clear_bit(0, &lis3->misc_opened); /* release the device */
+ if (lis3->pm_dev)
+ pm_runtime_put(lis3->pm_dev);
+ return 0;
+}
+
+static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct lis3lv02d *lis3 = container_of(file->private_data,
+ struct lis3lv02d, miscdev);
+
+ DECLARE_WAITQUEUE(wait, current);
+ u32 data;
+ unsigned char byte_data;
+ ssize_t retval = 1;
+
+ if (count < 1)
+ return -EINVAL;
+
+ add_wait_queue(&lis3->misc_wait, &wait);
+ while (true) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ data = atomic_xchg(&lis3->count, 0);
+ if (data)
+ break;
+
+ if (file->f_flags & O_NONBLOCK) {
+ retval = -EAGAIN;
+ goto out;
+ }
+
+ if (signal_pending(current)) {
+ retval = -ERESTARTSYS;
+ goto out;
+ }
+
+ schedule();
+ }
+
+ if (data < 255)
+ byte_data = data;
+ else
+ byte_data = 255;
+
+ /* make sure we are not going into copy_to_user() with
+ * TASK_INTERRUPTIBLE state */
+ set_current_state(TASK_RUNNING);
+ if (copy_to_user(buf, &byte_data, sizeof(byte_data)))
+ retval = -EFAULT;
+
+out:
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&lis3->misc_wait, &wait);
+
+ return retval;
+}
+
+static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
+{
+ struct lis3lv02d *lis3 = container_of(file->private_data,
+ struct lis3lv02d, miscdev);
+
+ poll_wait(file, &lis3->misc_wait, wait);
+ if (atomic_read(&lis3->count))
+ return POLLIN | POLLRDNORM;
+ return 0;
+}
+
+static int lis3lv02d_misc_fasync(int fd, struct file *file, int on)
+{
+ struct lis3lv02d *lis3 = container_of(file->private_data,
+ struct lis3lv02d, miscdev);
+
+ return fasync_helper(fd, file, on, &lis3->async_queue);
+}
+
+static const struct file_operations lis3lv02d_misc_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .read = lis3lv02d_misc_read,
+ .open = lis3lv02d_misc_open,
+ .release = lis3lv02d_misc_release,
+ .poll = lis3lv02d_misc_poll,
+ .fasync = lis3lv02d_misc_fasync,
+};
+
+int lis3lv02d_joystick_enable(struct lis3lv02d *lis3)
+{
+ struct input_dev *input_dev;
+ int err;
+ int max_val, fuzz, flat;
+ int btns[] = {BTN_X, BTN_Y, BTN_Z};
+
+ if (lis3->idev)
+ return -EINVAL;
+
+ lis3->idev = input_allocate_polled_device();
+ if (!lis3->idev)
+ return -ENOMEM;
+
+ lis3->idev->poll = lis3lv02d_joystick_poll;
+ lis3->idev->open = lis3lv02d_joystick_open;
+ lis3->idev->close = lis3lv02d_joystick_close;
+ lis3->idev->poll_interval = MDPS_POLL_INTERVAL;
+ lis3->idev->poll_interval_min = MDPS_POLL_MIN;
+ lis3->idev->poll_interval_max = MDPS_POLL_MAX;
+ lis3->idev->private = lis3;
+ input_dev = lis3->idev->input;
+
+ input_dev->name = "ST LIS3LV02DL Accelerometer";
+ input_dev->phys = DRIVER_NAME "/input0";
+ input_dev->id.bustype = BUS_HOST;
+ input_dev->id.vendor = 0;
+ input_dev->dev.parent = &lis3->pdev->dev;
+
+ set_bit(EV_ABS, input_dev->evbit);
+ max_val = (lis3->mdps_max_val * lis3->scale) / LIS3_ACCURACY;
+ if (lis3->whoami == WAI_12B) {
+ fuzz = LIS3_DEFAULT_FUZZ_12B;
+ flat = LIS3_DEFAULT_FLAT_12B;
+ } else {
+ fuzz = LIS3_DEFAULT_FUZZ_8B;
+ flat = LIS3_DEFAULT_FLAT_8B;
+ }
+ fuzz = (fuzz * lis3->scale) / LIS3_ACCURACY;
+ flat = (flat * lis3->scale) / LIS3_ACCURACY;
+
+ input_set_abs_params(input_dev, ABS_X, -max_val, max_val, fuzz, flat);
+ input_set_abs_params(input_dev, ABS_Y, -max_val, max_val, fuzz, flat);
+ input_set_abs_params(input_dev, ABS_Z, -max_val, max_val, fuzz, flat);
+
+ lis3->mapped_btns[0] = lis3lv02d_get_axis(abs(lis3->ac.x), btns);
+ lis3->mapped_btns[1] = lis3lv02d_get_axis(abs(lis3->ac.y), btns);
+ lis3->mapped_btns[2] = lis3lv02d_get_axis(abs(lis3->ac.z), btns);
+
+ err = input_register_polled_device(lis3->idev);
+ if (err) {
+ input_free_polled_device(lis3->idev);
+ lis3->idev = NULL;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(lis3lv02d_joystick_enable);
+
+void lis3lv02d_joystick_disable(struct lis3lv02d *lis3)
+{
+ if (lis3->irq)
+ free_irq(lis3->irq, lis3);
+ if (lis3->pdata && lis3->pdata->irq2)
+ free_irq(lis3->pdata->irq2, lis3);
+
+ if (!lis3->idev)
+ return;
+
+ if (lis3->irq)
+ misc_deregister(&lis3->miscdev);
+ input_unregister_polled_device(lis3->idev);
+ input_free_polled_device(lis3->idev);
+ lis3->idev = NULL;
+}
+EXPORT_SYMBOL_GPL(lis3lv02d_joystick_disable);
+
+/* Sysfs stuff */
+static void lis3lv02d_sysfs_poweron(struct lis3lv02d *lis3)
+{
+ /*
+ * SYSFS functions are fast visitors so put-call
+ * immediately after the get-call. However, keep
+ * chip running for a while and schedule delayed
+ * suspend. This way periodic sysfs calls doesn't
+ * suffer from relatively long power up time.
+ */
+
+ if (lis3->pm_dev) {
+ pm_runtime_get_sync(lis3->pm_dev);
+ pm_runtime_put_noidle(lis3->pm_dev);
+ pm_schedule_suspend(lis3->pm_dev, LIS3_SYSFS_POWERDOWN_DELAY);
+ }
+}
+
+static ssize_t lis3lv02d_selftest_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct lis3lv02d *lis3 = dev_get_drvdata(dev);
+ s16 values[3];
+
+ static const char ok[] = "OK";
+ static const char fail[] = "FAIL";
+ static const char irq[] = "FAIL_IRQ";
+ const char *res;
+
+ lis3lv02d_sysfs_poweron(lis3);
+ switch (lis3lv02d_selftest(lis3, values)) {
+ case SELFTEST_FAIL:
+ res = fail;
+ break;
+ case SELFTEST_IRQ:
+ res = irq;
+ break;
+ case SELFTEST_OK:
+ default:
+ res = ok;
+ break;
+ }
+ return sprintf(buf, "%s %d %d %d\n", res,
+ values[0], values[1], values[2]);
+}
+
+static ssize_t lis3lv02d_position_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct lis3lv02d *lis3 = dev_get_drvdata(dev);
+ int x, y, z;
+
+ lis3lv02d_sysfs_poweron(lis3);
+ mutex_lock(&lis3->mutex);
+ lis3lv02d_get_xyz(lis3, &x, &y, &z);
+ mutex_unlock(&lis3->mutex);
+ return sprintf(buf, "(%d,%d,%d)\n", x, y, z);
+}
+
+static ssize_t lis3lv02d_rate_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct lis3lv02d *lis3 = dev_get_drvdata(dev);
+
+ lis3lv02d_sysfs_poweron(lis3);
+ return sprintf(buf, "%d\n", lis3lv02d_get_odr(lis3));
+}
+
+static ssize_t lis3lv02d_rate_set(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct lis3lv02d *lis3 = dev_get_drvdata(dev);
+ unsigned long rate;
+
+ if (strict_strtoul(buf, 0, &rate))
+ return -EINVAL;
+
+ lis3lv02d_sysfs_poweron(lis3);
+ if (lis3lv02d_set_odr(lis3, rate))
+ return -EINVAL;
+
+ return count;
+}
+
+static DEVICE_ATTR(selftest, S_IRUSR, lis3lv02d_selftest_show, NULL);
+static DEVICE_ATTR(position, S_IRUGO, lis3lv02d_position_show, NULL);
+static DEVICE_ATTR(rate, S_IRUGO | S_IWUSR, lis3lv02d_rate_show,
+ lis3lv02d_rate_set);
+
+static struct attribute *lis3lv02d_attributes[] = {
+ &dev_attr_selftest.attr,
+ &dev_attr_position.attr,
+ &dev_attr_rate.attr,
+ NULL
+};
+
+static struct attribute_group lis3lv02d_attribute_group = {
+ .attrs = lis3lv02d_attributes
+};
+
+
+static int lis3lv02d_add_fs(struct lis3lv02d *lis3)
+{
+ lis3->pdev = platform_device_register_simple(DRIVER_NAME, -1, NULL, 0);
+ if (IS_ERR(lis3->pdev))
+ return PTR_ERR(lis3->pdev);
+
+ platform_set_drvdata(lis3->pdev, lis3);
+ return sysfs_create_group(&lis3->pdev->dev.kobj, &lis3lv02d_attribute_group);
+}
+
+int lis3lv02d_remove_fs(struct lis3lv02d *lis3)
+{
+ sysfs_remove_group(&lis3->pdev->dev.kobj, &lis3lv02d_attribute_group);
+ platform_device_unregister(lis3->pdev);
+ if (lis3->pm_dev) {
+ /* Barrier after the sysfs remove */
+ pm_runtime_barrier(lis3->pm_dev);
+
+ /* SYSFS may have left chip running. Turn off if necessary */
+ if (!pm_runtime_suspended(lis3->pm_dev))
+ lis3lv02d_poweroff(lis3);
+
+ pm_runtime_disable(lis3->pm_dev);
+ pm_runtime_set_suspended(lis3->pm_dev);
+ }
+ kfree(lis3->reg_cache);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(lis3lv02d_remove_fs);
+
+static void lis3lv02d_8b_configure(struct lis3lv02d *lis3,
+ struct lis3lv02d_platform_data *p)
+{
+ int err;
+ int ctrl2 = p->hipass_ctrl;
+
+ if (p->click_flags) {
+ lis3->write(lis3, CLICK_CFG, p->click_flags);
+ lis3->write(lis3, CLICK_TIMELIMIT, p->click_time_limit);
+ lis3->write(lis3, CLICK_LATENCY, p->click_latency);
+ lis3->write(lis3, CLICK_WINDOW, p->click_window);
+ lis3->write(lis3, CLICK_THSZ, p->click_thresh_z & 0xf);
+ lis3->write(lis3, CLICK_THSY_X,
+ (p->click_thresh_x & 0xf) |
+ (p->click_thresh_y << 4));
+
+ if (lis3->idev) {
+ struct input_dev *input_dev = lis3->idev->input;
+ input_set_capability(input_dev, EV_KEY, BTN_X);
+ input_set_capability(input_dev, EV_KEY, BTN_Y);
+ input_set_capability(input_dev, EV_KEY, BTN_Z);
+ }
+ }
+
+ if (p->wakeup_flags) {
+ lis3->write(lis3, FF_WU_CFG_1, p->wakeup_flags);
+ lis3->write(lis3, FF_WU_THS_1, p->wakeup_thresh & 0x7f);
+ /* pdata value + 1 to keep this backward compatible*/
+ lis3->write(lis3, FF_WU_DURATION_1, p->duration1 + 1);
+ ctrl2 ^= HP_FF_WU1; /* Xor to keep compatible with old pdata*/
+ }
+
+ if (p->wakeup_flags2) {
+ lis3->write(lis3, FF_WU_CFG_2, p->wakeup_flags2);
+ lis3->write(lis3, FF_WU_THS_2, p->wakeup_thresh2 & 0x7f);
+ /* pdata value + 1 to keep this backward compatible*/
+ lis3->write(lis3, FF_WU_DURATION_2, p->duration2 + 1);
+ ctrl2 ^= HP_FF_WU2; /* Xor to keep compatible with old pdata*/
+ }
+ /* Configure hipass filters */
+ lis3->write(lis3, CTRL_REG2, ctrl2);
+
+ if (p->irq2) {
+ err = request_threaded_irq(p->irq2,
+ NULL,
+ lis302dl_interrupt_thread2_8b,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT |
+ (p->irq_flags2 & IRQF_TRIGGER_MASK),
+ DRIVER_NAME, lis3);
+ if (err < 0)
+ pr_err("No second IRQ. Limited functionality\n");
+ }
+}
+
+/*
+ * Initialise the accelerometer and the various subsystems.
+ * Should be rather independent of the bus system.
+ */
+int lis3lv02d_init_device(struct lis3lv02d *lis3)
+{
+ int err;
+ irq_handler_t thread_fn;
+ int irq_flags = 0;
+
+ lis3->whoami = lis3lv02d_read_8(lis3, WHO_AM_I);
+
+ switch (lis3->whoami) {
+ case WAI_12B:
+ pr_info("12 bits sensor found\n");
+ lis3->read_data = lis3lv02d_read_12;
+ lis3->mdps_max_val = 2048;
+ lis3->pwron_delay = LIS3_PWRON_DELAY_WAI_12B;
+ lis3->odrs = lis3_12_rates;
+ lis3->odr_mask = CTRL1_DF0 | CTRL1_DF1;
+ lis3->scale = LIS3_SENSITIVITY_12B;
+ lis3->regs = lis3_wai12_regs;
+ lis3->regs_size = ARRAY_SIZE(lis3_wai12_regs);
+ break;
+ case WAI_8B:
+ pr_info("8 bits sensor found\n");
+ lis3->read_data = lis3lv02d_read_8;
+ lis3->mdps_max_val = 128;
+ lis3->pwron_delay = LIS3_PWRON_DELAY_WAI_8B;
+ lis3->odrs = lis3_8_rates;
+ lis3->odr_mask = CTRL1_DR;
+ lis3->scale = LIS3_SENSITIVITY_8B;
+ lis3->regs = lis3_wai8_regs;
+ lis3->regs_size = ARRAY_SIZE(lis3_wai8_regs);
+ break;
+ case WAI_3DC:
+ pr_info("8 bits 3DC sensor found\n");
+ lis3->read_data = lis3lv02d_read_8;
+ lis3->mdps_max_val = 128;
+ lis3->pwron_delay = LIS3_PWRON_DELAY_WAI_8B;
+ lis3->odrs = lis3_3dc_rates;
+ lis3->odr_mask = CTRL1_ODR0|CTRL1_ODR1|CTRL1_ODR2|CTRL1_ODR3;
+ lis3->scale = LIS3_SENSITIVITY_8B;
+ break;
+ default:
+ pr_err("unknown sensor type 0x%X\n", lis3->whoami);
+ return -EINVAL;
+ }
+
+ lis3->reg_cache = kzalloc(max(sizeof(lis3_wai8_regs),
+ sizeof(lis3_wai12_regs)), GFP_KERNEL);
+
+ if (lis3->reg_cache == NULL) {
+ printk(KERN_ERR DRIVER_NAME "out of memory\n");
+ return -ENOMEM;
+ }
+
+ mutex_init(&lis3->mutex);
+ atomic_set(&lis3->wake_thread, 0);
+
+ lis3lv02d_add_fs(lis3);
+ err = lis3lv02d_poweron(lis3);
+ if (err) {
+ lis3lv02d_remove_fs(lis3);
+ return err;
+ }
+
+ if (lis3->pm_dev) {
+ pm_runtime_set_active(lis3->pm_dev);
+ pm_runtime_enable(lis3->pm_dev);
+ }
+
+ if (lis3lv02d_joystick_enable(lis3))
+ pr_err("joystick initialization failed\n");
+
+ /* passing in platform specific data is purely optional and only
+ * used by the SPI transport layer at the moment */
+ if (lis3->pdata) {
+ struct lis3lv02d_platform_data *p = lis3->pdata;
+
+ if (lis3->whoami == WAI_8B)
+ lis3lv02d_8b_configure(lis3, p);
+
+ irq_flags = p->irq_flags1 & IRQF_TRIGGER_MASK;
+
+ lis3->irq_cfg = p->irq_cfg;
+ if (p->irq_cfg)
+ lis3->write(lis3, CTRL_REG3, p->irq_cfg);
+
+ if (p->default_rate)
+ lis3lv02d_set_odr(lis3, p->default_rate);
+ }
+
+ /* bail if we did not get an IRQ from the bus layer */
+ if (!lis3->irq) {
+ pr_debug("No IRQ. Disabling /dev/freefall\n");
+ goto out;
+ }
+
+ /*
+ * The sensor can generate interrupts for free-fall and direction
+ * detection (distinguishable with FF_WU_SRC and DD_SRC) but to keep
+ * the things simple and _fast_ we activate it only for free-fall, so
+ * no need to read register (very slow with ACPI). For the same reason,
+ * we forbid shared interrupts.
+ *
+ * IRQF_TRIGGER_RISING seems pointless on HP laptops because the
+ * io-apic is not configurable (and generates a warning) but I keep it
+ * in case of support for other hardware.
+ */
+ if (lis3->pdata && lis3->whoami == WAI_8B)
+ thread_fn = lis302dl_interrupt_thread1_8b;
+ else
+ thread_fn = NULL;
+
+ err = request_threaded_irq(lis3->irq, lis302dl_interrupt,
+ thread_fn,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT |
+ irq_flags,
+ DRIVER_NAME, lis3);
+
+ if (err < 0) {
+ pr_err("Cannot get IRQ\n");
+ goto out;
+ }
+
+ lis3->miscdev.minor = MISC_DYNAMIC_MINOR;
+ lis3->miscdev.name = "freefall";
+ lis3->miscdev.fops = &lis3lv02d_misc_fops;
+
+ if (misc_register(&lis3->miscdev))
+ pr_err("misc_register failed\n");
+out:
+ return 0;
+}
+EXPORT_SYMBOL_GPL(lis3lv02d_init_device);
+
+MODULE_DESCRIPTION("ST LIS3LV02Dx three-axis digital accelerometer driver");
+MODULE_AUTHOR("Yan Burman, Eric Piel, Pavel Machek");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
new file mode 100644
index 00000000..2b1482ad
--- /dev/null
+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
@@ -0,0 +1,294 @@
+/*
+ * lis3lv02d.h - ST LIS3LV02DL accelerometer driver
+ *
+ * Copyright (C) 2007-2008 Yan Burman
+ * Copyright (C) 2008-2009 Eric Piel
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/platform_device.h>
+#include <linux/input-polldev.h>
+#include <linux/regulator/consumer.h>
+#include <linux/miscdevice.h>
+
+/*
+ * This driver tries to support the "digital" accelerometer chips from
+ * STMicroelectronics such as LIS3LV02DL, LIS302DL, LIS3L02DQ, LIS331DL,
+ * LIS35DE, or LIS202DL. They are very similar in terms of programming, with
+ * almost the same registers. In addition to differing on physical properties,
+ * they differ on the number of axes (2/3), precision (8/12 bits), and special
+ * features (freefall detection, click...). Unfortunately, not all the
+ * differences can be probed via a register.
+ * They can be connected either via I²C or SPI.
+ */
+
+#include <linux/lis3lv02d.h>
+
+enum lis3_reg {
+ WHO_AM_I = 0x0F,
+ OFFSET_X = 0x16,
+ OFFSET_Y = 0x17,
+ OFFSET_Z = 0x18,
+ GAIN_X = 0x19,
+ GAIN_Y = 0x1A,
+ GAIN_Z = 0x1B,
+ CTRL_REG1 = 0x20,
+ CTRL_REG2 = 0x21,
+ CTRL_REG3 = 0x22,
+ CTRL_REG4 = 0x23,
+ HP_FILTER_RESET = 0x23,
+ STATUS_REG = 0x27,
+ OUTX_L = 0x28,
+ OUTX_H = 0x29,
+ OUTX = 0x29,
+ OUTY_L = 0x2A,
+ OUTY_H = 0x2B,
+ OUTY = 0x2B,
+ OUTZ_L = 0x2C,
+ OUTZ_H = 0x2D,
+ OUTZ = 0x2D,
+};
+
+enum lis302d_reg {
+ FF_WU_CFG_1 = 0x30,
+ FF_WU_SRC_1 = 0x31,
+ FF_WU_THS_1 = 0x32,
+ FF_WU_DURATION_1 = 0x33,
+ FF_WU_CFG_2 = 0x34,
+ FF_WU_SRC_2 = 0x35,
+ FF_WU_THS_2 = 0x36,
+ FF_WU_DURATION_2 = 0x37,
+ CLICK_CFG = 0x38,
+ CLICK_SRC = 0x39,
+ CLICK_THSY_X = 0x3B,
+ CLICK_THSZ = 0x3C,
+ CLICK_TIMELIMIT = 0x3D,
+ CLICK_LATENCY = 0x3E,
+ CLICK_WINDOW = 0x3F,
+};
+
+enum lis3lv02d_reg {
+ FF_WU_CFG = 0x30,
+ FF_WU_SRC = 0x31,
+ FF_WU_ACK = 0x32,
+ FF_WU_THS_L = 0x34,
+ FF_WU_THS_H = 0x35,
+ FF_WU_DURATION = 0x36,
+ DD_CFG = 0x38,
+ DD_SRC = 0x39,
+ DD_ACK = 0x3A,
+ DD_THSI_L = 0x3C,
+ DD_THSI_H = 0x3D,
+ DD_THSE_L = 0x3E,
+ DD_THSE_H = 0x3F,
+};
+
+enum lis3_who_am_i {
+ WAI_3DC = 0x33, /* 8 bits: LIS3DC, HP3DC */
+ WAI_12B = 0x3A, /* 12 bits: LIS3LV02D[LQ]... */
+ WAI_8B = 0x3B, /* 8 bits: LIS[23]02D[LQ]... */
+ WAI_6B = 0x52, /* 6 bits: LIS331DLF - not supported */
+};
+
+enum lis3lv02d_ctrl1_12b {
+ CTRL1_Xen = 0x01,
+ CTRL1_Yen = 0x02,
+ CTRL1_Zen = 0x04,
+ CTRL1_ST = 0x08,
+ CTRL1_DF0 = 0x10,
+ CTRL1_DF1 = 0x20,
+ CTRL1_PD0 = 0x40,
+ CTRL1_PD1 = 0x80,
+};
+
+/* Delta to ctrl1_12b version */
+enum lis3lv02d_ctrl1_8b {
+ CTRL1_STM = 0x08,
+ CTRL1_STP = 0x10,
+ CTRL1_FS = 0x20,
+ CTRL1_PD = 0x40,
+ CTRL1_DR = 0x80,
+};
+
+enum lis3lv02d_ctrl1_3dc {
+ CTRL1_ODR0 = 0x10,
+ CTRL1_ODR1 = 0x20,
+ CTRL1_ODR2 = 0x40,
+ CTRL1_ODR3 = 0x80,
+};
+
+enum lis3lv02d_ctrl2 {
+ CTRL2_DAS = 0x01,
+ CTRL2_SIM = 0x02,
+ CTRL2_DRDY = 0x04,
+ CTRL2_IEN = 0x08,
+ CTRL2_BOOT = 0x10,
+ CTRL2_BLE = 0x20,
+ CTRL2_BDU = 0x40, /* Block Data Update */
+ CTRL2_FS = 0x80, /* Full Scale selection */
+};
+
+enum lis3lv02d_ctrl4_3dc {
+ CTRL4_SIM = 0x01,
+ CTRL4_ST0 = 0x02,
+ CTRL4_ST1 = 0x04,
+ CTRL4_FS0 = 0x10,
+ CTRL4_FS1 = 0x20,
+};
+
+enum lis302d_ctrl2 {
+ HP_FF_WU2 = 0x08,
+ HP_FF_WU1 = 0x04,
+ CTRL2_BOOT_8B = 0x40,
+};
+
+enum lis3lv02d_ctrl3 {
+ CTRL3_CFS0 = 0x01,
+ CTRL3_CFS1 = 0x02,
+ CTRL3_FDS = 0x10,
+ CTRL3_HPFF = 0x20,
+ CTRL3_HPDD = 0x40,
+ CTRL3_ECK = 0x80,
+};
+
+enum lis3lv02d_status_reg {
+ STATUS_XDA = 0x01,
+ STATUS_YDA = 0x02,
+ STATUS_ZDA = 0x04,
+ STATUS_XYZDA = 0x08,
+ STATUS_XOR = 0x10,
+ STATUS_YOR = 0x20,
+ STATUS_ZOR = 0x40,
+ STATUS_XYZOR = 0x80,
+};
+
+enum lis3lv02d_ff_wu_cfg {
+ FF_WU_CFG_XLIE = 0x01,
+ FF_WU_CFG_XHIE = 0x02,
+ FF_WU_CFG_YLIE = 0x04,
+ FF_WU_CFG_YHIE = 0x08,
+ FF_WU_CFG_ZLIE = 0x10,
+ FF_WU_CFG_ZHIE = 0x20,
+ FF_WU_CFG_LIR = 0x40,
+ FF_WU_CFG_AOI = 0x80,
+};
+
+enum lis3lv02d_ff_wu_src {
+ FF_WU_SRC_XL = 0x01,
+ FF_WU_SRC_XH = 0x02,
+ FF_WU_SRC_YL = 0x04,
+ FF_WU_SRC_YH = 0x08,
+ FF_WU_SRC_ZL = 0x10,
+ FF_WU_SRC_ZH = 0x20,
+ FF_WU_SRC_IA = 0x40,
+};
+
+enum lis3lv02d_dd_cfg {
+ DD_CFG_XLIE = 0x01,
+ DD_CFG_XHIE = 0x02,
+ DD_CFG_YLIE = 0x04,
+ DD_CFG_YHIE = 0x08,
+ DD_CFG_ZLIE = 0x10,
+ DD_CFG_ZHIE = 0x20,
+ DD_CFG_LIR = 0x40,
+ DD_CFG_IEND = 0x80,
+};
+
+enum lis3lv02d_dd_src {
+ DD_SRC_XL = 0x01,
+ DD_SRC_XH = 0x02,
+ DD_SRC_YL = 0x04,
+ DD_SRC_YH = 0x08,
+ DD_SRC_ZL = 0x10,
+ DD_SRC_ZH = 0x20,
+ DD_SRC_IA = 0x40,
+};
+
+enum lis3lv02d_click_src_8b {
+ CLICK_SINGLE_X = 0x01,
+ CLICK_DOUBLE_X = 0x02,
+ CLICK_SINGLE_Y = 0x04,
+ CLICK_DOUBLE_Y = 0x08,
+ CLICK_SINGLE_Z = 0x10,
+ CLICK_DOUBLE_Z = 0x20,
+ CLICK_IA = 0x40,
+};
+
+enum lis3lv02d_reg_state {
+ LIS3_REG_OFF = 0x00,
+ LIS3_REG_ON = 0x01,
+};
+
+union axis_conversion {
+ struct {
+ int x, y, z;
+ };
+ int as_array[3];
+
+};
+
+struct lis3lv02d {
+ void *bus_priv; /* used by the bus layer only */
+ struct device *pm_dev; /* for pm_runtime purposes */
+ int (*init) (struct lis3lv02d *lis3);
+ int (*write) (struct lis3lv02d *lis3, int reg, u8 val);
+ int (*read) (struct lis3lv02d *lis3, int reg, u8 *ret);
+ int (*blkread) (struct lis3lv02d *lis3, int reg, int len, u8 *ret);
+ int (*reg_ctrl) (struct lis3lv02d *lis3, bool state);
+
+ int *odrs; /* Supported output data rates */
+ u8 *regs; /* Regs to store / restore */
+ int regs_size;
+ u8 *reg_cache;
+ bool regs_stored;
+ u8 odr_mask; /* ODR bit mask */
+ u8 whoami; /* indicates measurement precision */
+ s16 (*read_data) (struct lis3lv02d *lis3, int reg);
+ int mdps_max_val;
+ int pwron_delay;
+ int scale; /*
+ * relationship between 1 LBS and mG
+ * (1/1000th of earth gravity)
+ */
+
+ struct input_polled_dev *idev; /* input device */
+ struct platform_device *pdev; /* platform device */
+ struct regulator_bulk_data regulators[2];
+ atomic_t count; /* interrupt count after last read */
+ union axis_conversion ac; /* hw -> logical axis */
+ int mapped_btns[3];
+
+ u32 irq; /* IRQ number */
+ struct fasync_struct *async_queue; /* queue for the misc device */
+ wait_queue_head_t misc_wait; /* Wait queue for the misc device */
+ unsigned long misc_opened; /* bit0: whether the device is open */
+ struct miscdevice miscdev;
+
+ int data_ready_count[2];
+ atomic_t wake_thread;
+ unsigned char irq_cfg;
+
+ struct lis3lv02d_platform_data *pdata; /* for passing board config */
+ struct mutex mutex; /* Serialize poll and selftest */
+};
+
+int lis3lv02d_init_device(struct lis3lv02d *lis3);
+int lis3lv02d_joystick_enable(struct lis3lv02d *lis3);
+void lis3lv02d_joystick_disable(struct lis3lv02d *lis3);
+void lis3lv02d_poweroff(struct lis3lv02d *lis3);
+int lis3lv02d_poweron(struct lis3lv02d *lis3);
+int lis3lv02d_remove_fs(struct lis3lv02d *lis3);
+
+extern struct lis3lv02d lis3_dev;
diff --git a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
new file mode 100644
index 00000000..e8c0019d
--- /dev/null
+++ b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
@@ -0,0 +1,263 @@
+/*
+ * drivers/hwmon/lis3lv02d_i2c.c
+ *
+ * Implements I2C interface for lis3lv02d (STMicroelectronics) accelerometer.
+ * Driver is based on corresponding SPI driver written by Daniel Mack
+ * (lis3lv02d_spi.c (C) 2009 Daniel Mack <daniel@caiaq.de> ).
+ *
+ * Copyright (C) 2009 Nokia Corporation and/or its subsidiary(-ies).
+ *
+ * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/pm_runtime.h>
+#include <linux/delay.h>
+#include "lis3lv02d.h"
+
+#define DRV_NAME "lis3lv02d_i2c"
+
+static const char reg_vdd[] = "Vdd";
+static const char reg_vdd_io[] = "Vdd_IO";
+
+static int lis3_reg_ctrl(struct lis3lv02d *lis3, bool state)
+{
+ int ret;
+ if (state == LIS3_REG_OFF) {
+ ret = regulator_bulk_disable(ARRAY_SIZE(lis3->regulators),
+ lis3->regulators);
+ } else {
+ ret = regulator_bulk_enable(ARRAY_SIZE(lis3->regulators),
+ lis3->regulators);
+ /* Chip needs time to wakeup. Not mentioned in datasheet */
+ usleep_range(10000, 20000);
+ }
+ return ret;
+}
+
+static inline s32 lis3_i2c_write(struct lis3lv02d *lis3, int reg, u8 value)
+{
+ struct i2c_client *c = lis3->bus_priv;
+ return i2c_smbus_write_byte_data(c, reg, value);
+}
+
+static inline s32 lis3_i2c_read(struct lis3lv02d *lis3, int reg, u8 *v)
+{
+ struct i2c_client *c = lis3->bus_priv;
+ *v = i2c_smbus_read_byte_data(c, reg);
+ return 0;
+}
+
+static inline s32 lis3_i2c_blockread(struct lis3lv02d *lis3, int reg, int len,
+ u8 *v)
+{
+ struct i2c_client *c = lis3->bus_priv;
+ reg |= (1 << 7); /* 7th bit enables address auto incrementation */
+ return i2c_smbus_read_i2c_block_data(c, reg, len, v);
+}
+
+static int lis3_i2c_init(struct lis3lv02d *lis3)
+{
+ u8 reg;
+ int ret;
+
+ lis3_reg_ctrl(lis3, LIS3_REG_ON);
+
+ lis3->read(lis3, WHO_AM_I, &reg);
+ if (reg != lis3->whoami)
+ printk(KERN_ERR "lis3: power on failure\n");
+
+ /* power up the device */
+ ret = lis3->read(lis3, CTRL_REG1, &reg);
+ if (ret < 0)
+ return ret;
+
+ reg |= CTRL1_PD0 | CTRL1_Xen | CTRL1_Yen | CTRL1_Zen;
+ return lis3->write(lis3, CTRL_REG1, reg);
+}
+
+/* Default axis mapping but it can be overwritten by platform data */
+static union axis_conversion lis3lv02d_axis_map =
+ { .as_array = { LIS3_DEV_X, LIS3_DEV_Y, LIS3_DEV_Z } };
+
+static int __devinit lis3lv02d_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret = 0;
+ struct lis3lv02d_platform_data *pdata = client->dev.platform_data;
+
+ if (pdata) {
+ if ((pdata->driver_features & LIS3_USE_BLOCK_READ) &&
+ (i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_I2C_BLOCK)))
+ lis3_dev.blkread = lis3_i2c_blockread;
+
+ if (pdata->axis_x)
+ lis3lv02d_axis_map.x = pdata->axis_x;
+
+ if (pdata->axis_y)
+ lis3lv02d_axis_map.y = pdata->axis_y;
+
+ if (pdata->axis_z)
+ lis3lv02d_axis_map.z = pdata->axis_z;
+
+ if (pdata->setup_resources)
+ ret = pdata->setup_resources();
+
+ if (ret)
+ goto fail;
+ }
+
+ lis3_dev.regulators[0].supply = reg_vdd;
+ lis3_dev.regulators[1].supply = reg_vdd_io;
+ ret = regulator_bulk_get(&client->dev,
+ ARRAY_SIZE(lis3_dev.regulators),
+ lis3_dev.regulators);
+ if (ret < 0)
+ goto fail;
+
+ lis3_dev.pdata = pdata;
+ lis3_dev.bus_priv = client;
+ lis3_dev.init = lis3_i2c_init;
+ lis3_dev.read = lis3_i2c_read;
+ lis3_dev.write = lis3_i2c_write;
+ lis3_dev.irq = client->irq;
+ lis3_dev.ac = lis3lv02d_axis_map;
+ lis3_dev.pm_dev = &client->dev;
+
+ i2c_set_clientdata(client, &lis3_dev);
+
+ /* Provide power over the init call */
+ lis3_reg_ctrl(&lis3_dev, LIS3_REG_ON);
+
+ ret = lis3lv02d_init_device(&lis3_dev);
+
+ lis3_reg_ctrl(&lis3_dev, LIS3_REG_OFF);
+
+ if (ret)
+ goto fail2;
+ return 0;
+
+fail2:
+ regulator_bulk_free(ARRAY_SIZE(lis3_dev.regulators),
+ lis3_dev.regulators);
+fail:
+ if (pdata && pdata->release_resources)
+ pdata->release_resources();
+ return ret;
+}
+
+static int __devexit lis3lv02d_i2c_remove(struct i2c_client *client)
+{
+ struct lis3lv02d *lis3 = i2c_get_clientdata(client);
+ struct lis3lv02d_platform_data *pdata = client->dev.platform_data;
+
+ if (pdata && pdata->release_resources)
+ pdata->release_resources();
+
+ lis3lv02d_joystick_disable(lis3);
+ lis3lv02d_remove_fs(&lis3_dev);
+
+ regulator_bulk_free(ARRAY_SIZE(lis3->regulators),
+ lis3_dev.regulators);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int lis3lv02d_i2c_suspend(struct device *dev)
+{
+ struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct lis3lv02d *lis3 = i2c_get_clientdata(client);
+
+ if (!lis3->pdata || !lis3->pdata->wakeup_flags)
+ lis3lv02d_poweroff(lis3);
+ return 0;
+}
+
+static int lis3lv02d_i2c_resume(struct device *dev)
+{
+ struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct lis3lv02d *lis3 = i2c_get_clientdata(client);
+
+ /*
+ * pm_runtime documentation says that devices should always
+ * be powered on at resume. Pm_runtime turns them off after system
+ * wide resume is complete.
+ */
+ if (!lis3->pdata || !lis3->pdata->wakeup_flags ||
+ pm_runtime_suspended(dev))
+ lis3lv02d_poweron(lis3);
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM_RUNTIME
+static int lis3_i2c_runtime_suspend(struct device *dev)
+{
+ struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct lis3lv02d *lis3 = i2c_get_clientdata(client);
+
+ lis3lv02d_poweroff(lis3);
+ return 0;
+}
+
+static int lis3_i2c_runtime_resume(struct device *dev)
+{
+ struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct lis3lv02d *lis3 = i2c_get_clientdata(client);
+
+ lis3lv02d_poweron(lis3);
+ return 0;
+}
+#endif /* CONFIG_PM_RUNTIME */
+
+static const struct i2c_device_id lis3lv02d_id[] = {
+ {"lis3lv02d", 0 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, lis3lv02d_id);
+
+static const struct dev_pm_ops lis3_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(lis3lv02d_i2c_suspend,
+ lis3lv02d_i2c_resume)
+ SET_RUNTIME_PM_OPS(lis3_i2c_runtime_suspend,
+ lis3_i2c_runtime_resume,
+ NULL)
+};
+
+static struct i2c_driver lis3lv02d_i2c_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .pm = &lis3_pm_ops,
+ },
+ .probe = lis3lv02d_i2c_probe,
+ .remove = __devexit_p(lis3lv02d_i2c_remove),
+ .id_table = lis3lv02d_id,
+};
+
+module_i2c_driver(lis3lv02d_i2c_driver);
+
+MODULE_AUTHOR("Nokia Corporation");
+MODULE_DESCRIPTION("lis3lv02d I2C interface");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/lis3lv02d/lis3lv02d_spi.c b/drivers/misc/lis3lv02d/lis3lv02d_spi.c
new file mode 100644
index 00000000..80880e98
--- /dev/null
+++ b/drivers/misc/lis3lv02d/lis3lv02d_spi.c
@@ -0,0 +1,134 @@
+/*
+ * lis3lv02d_spi - SPI glue layer for lis3lv02d
+ *
+ * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * publishhed by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/spi/spi.h>
+#include <linux/pm.h>
+
+#include "lis3lv02d.h"
+
+#define DRV_NAME "lis3lv02d_spi"
+#define LIS3_SPI_READ 0x80
+
+static int lis3_spi_read(struct lis3lv02d *lis3, int reg, u8 *v)
+{
+ struct spi_device *spi = lis3->bus_priv;
+ int ret = spi_w8r8(spi, reg | LIS3_SPI_READ);
+ if (ret < 0)
+ return -EINVAL;
+
+ *v = (u8) ret;
+ return 0;
+}
+
+static int lis3_spi_write(struct lis3lv02d *lis3, int reg, u8 val)
+{
+ u8 tmp[2] = { reg, val };
+ struct spi_device *spi = lis3->bus_priv;
+ return spi_write(spi, tmp, sizeof(tmp));
+}
+
+static int lis3_spi_init(struct lis3lv02d *lis3)
+{
+ u8 reg;
+ int ret;
+
+ /* power up the device */
+ ret = lis3->read(lis3, CTRL_REG1, &reg);
+ if (ret < 0)
+ return ret;
+
+ reg |= CTRL1_PD0 | CTRL1_Xen | CTRL1_Yen | CTRL1_Zen;
+ return lis3->write(lis3, CTRL_REG1, reg);
+}
+
+static union axis_conversion lis3lv02d_axis_normal =
+ { .as_array = { 1, 2, 3 } };
+
+static int __devinit lis302dl_spi_probe(struct spi_device *spi)
+{
+ int ret;
+
+ spi->bits_per_word = 8;
+ spi->mode = SPI_MODE_0;
+ ret = spi_setup(spi);
+ if (ret < 0)
+ return ret;
+
+ lis3_dev.bus_priv = spi;
+ lis3_dev.init = lis3_spi_init;
+ lis3_dev.read = lis3_spi_read;
+ lis3_dev.write = lis3_spi_write;
+ lis3_dev.irq = spi->irq;
+ lis3_dev.ac = lis3lv02d_axis_normal;
+ lis3_dev.pdata = spi->dev.platform_data;
+ spi_set_drvdata(spi, &lis3_dev);
+
+ return lis3lv02d_init_device(&lis3_dev);
+}
+
+static int __devexit lis302dl_spi_remove(struct spi_device *spi)
+{
+ struct lis3lv02d *lis3 = spi_get_drvdata(spi);
+ lis3lv02d_joystick_disable(lis3);
+ lis3lv02d_poweroff(lis3);
+
+ return lis3lv02d_remove_fs(&lis3_dev);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int lis3lv02d_spi_suspend(struct device *dev)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct lis3lv02d *lis3 = spi_get_drvdata(spi);
+
+ if (!lis3->pdata || !lis3->pdata->wakeup_flags)
+ lis3lv02d_poweroff(&lis3_dev);
+
+ return 0;
+}
+
+static int lis3lv02d_spi_resume(struct device *dev)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct lis3lv02d *lis3 = spi_get_drvdata(spi);
+
+ if (!lis3->pdata || !lis3->pdata->wakeup_flags)
+ lis3lv02d_poweron(lis3);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(lis3lv02d_spi_pm, lis3lv02d_spi_suspend,
+ lis3lv02d_spi_resume);
+
+static struct spi_driver lis302dl_spi_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .pm = &lis3lv02d_spi_pm,
+ },
+ .probe = lis302dl_spi_probe,
+ .remove = __devexit_p(lis302dl_spi_remove),
+};
+
+module_spi_driver(lis302dl_spi_driver);
+
+MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>");
+MODULE_DESCRIPTION("lis3lv02d SPI glue layer");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:" DRV_NAME);
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
new file mode 100644
index 00000000..28adefe7
--- /dev/null
+++ b/drivers/misc/lkdtm.c
@@ -0,0 +1,695 @@
+/*
+ * Kprobe module for testing crash dumps
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2006
+ *
+ * Author: Ankita Garg <ankita@in.ibm.com>
+ *
+ * This module induces system failures at predefined crashpoints to
+ * evaluate the reliability of crash dumps obtained using different dumping
+ * solutions.
+ *
+ * It is adapted from the Linux Kernel Dump Test Tool by
+ * Fernando Luis Vazquez Cao <http://lkdtt.sourceforge.net>
+ *
+ * Debugfs support added by Simon Kagstrom <simon.kagstrom@netinsight.net>
+ *
+ * See Documentation/fault-injection/provoke-crashes.txt for instructions
+ */
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/buffer_head.h>
+#include <linux/kprobes.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/hrtimer.h>
+#include <linux/slab.h>
+#include <scsi/scsi_cmnd.h>
+#include <linux/debugfs.h>
+
+#ifdef CONFIG_IDE
+#include <linux/ide.h>
+#endif
+
+#define DEFAULT_COUNT 10
+#define REC_NUM_DEFAULT 10
+
+enum cname {
+ CN_INVALID,
+ CN_INT_HARDWARE_ENTRY,
+ CN_INT_HW_IRQ_EN,
+ CN_INT_TASKLET_ENTRY,
+ CN_FS_DEVRW,
+ CN_MEM_SWAPOUT,
+ CN_TIMERADD,
+ CN_SCSI_DISPATCH_CMD,
+ CN_IDE_CORE_CP,
+ CN_DIRECT,
+};
+
+enum ctype {
+ CT_NONE,
+ CT_PANIC,
+ CT_BUG,
+ CT_EXCEPTION,
+ CT_LOOP,
+ CT_OVERFLOW,
+ CT_CORRUPT_STACK,
+ CT_UNALIGNED_LOAD_STORE_WRITE,
+ CT_OVERWRITE_ALLOCATION,
+ CT_WRITE_AFTER_FREE,
+ CT_SOFTLOCKUP,
+ CT_HARDLOCKUP,
+ CT_HUNG_TASK,
+};
+
+static char* cp_name[] = {
+ "INT_HARDWARE_ENTRY",
+ "INT_HW_IRQ_EN",
+ "INT_TASKLET_ENTRY",
+ "FS_DEVRW",
+ "MEM_SWAPOUT",
+ "TIMERADD",
+ "SCSI_DISPATCH_CMD",
+ "IDE_CORE_CP",
+ "DIRECT",
+};
+
+static char* cp_type[] = {
+ "PANIC",
+ "BUG",
+ "EXCEPTION",
+ "LOOP",
+ "OVERFLOW",
+ "CORRUPT_STACK",
+ "UNALIGNED_LOAD_STORE_WRITE",
+ "OVERWRITE_ALLOCATION",
+ "WRITE_AFTER_FREE",
+ "SOFTLOCKUP",
+ "HARDLOCKUP",
+ "HUNG_TASK",
+};
+
+static struct jprobe lkdtm;
+
+static int lkdtm_parse_commandline(void);
+static void lkdtm_handler(void);
+
+static char* cpoint_name;
+static char* cpoint_type;
+static int cpoint_count = DEFAULT_COUNT;
+static int recur_count = REC_NUM_DEFAULT;
+
+static enum cname cpoint = CN_INVALID;
+static enum ctype cptype = CT_NONE;
+static int count = DEFAULT_COUNT;
+static DEFINE_SPINLOCK(count_lock);
+
+module_param(recur_count, int, 0644);
+MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test, "\
+ "default is 10");
+module_param(cpoint_name, charp, 0444);
+MODULE_PARM_DESC(cpoint_name, " Crash Point, where kernel is to be crashed");
+module_param(cpoint_type, charp, 0444);
+MODULE_PARM_DESC(cpoint_type, " Crash Point Type, action to be taken on "\
+ "hitting the crash point");
+module_param(cpoint_count, int, 0644);
+MODULE_PARM_DESC(cpoint_count, " Crash Point Count, number of times the "\
+ "crash point is to be hit to trigger action");
+
+static unsigned int jp_do_irq(unsigned int irq)
+{
+ lkdtm_handler();
+ jprobe_return();
+ return 0;
+}
+
+static irqreturn_t jp_handle_irq_event(unsigned int irq,
+ struct irqaction *action)
+{
+ lkdtm_handler();
+ jprobe_return();
+ return 0;
+}
+
+static void jp_tasklet_action(struct softirq_action *a)
+{
+ lkdtm_handler();
+ jprobe_return();
+}
+
+static void jp_ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
+{
+ lkdtm_handler();
+ jprobe_return();
+}
+
+struct scan_control;
+
+static unsigned long jp_shrink_inactive_list(unsigned long max_scan,
+ struct zone *zone,
+ struct scan_control *sc)
+{
+ lkdtm_handler();
+ jprobe_return();
+ return 0;
+}
+
+static int jp_hrtimer_start(struct hrtimer *timer, ktime_t tim,
+ const enum hrtimer_mode mode)
+{
+ lkdtm_handler();
+ jprobe_return();
+ return 0;
+}
+
+static int jp_scsi_dispatch_cmd(struct scsi_cmnd *cmd)
+{
+ lkdtm_handler();
+ jprobe_return();
+ return 0;
+}
+
+#ifdef CONFIG_IDE
+int jp_generic_ide_ioctl(ide_drive_t *drive, struct file *file,
+ struct block_device *bdev, unsigned int cmd,
+ unsigned long arg)
+{
+ lkdtm_handler();
+ jprobe_return();
+ return 0;
+}
+#endif
+
+/* Return the crashpoint number or NONE if the name is invalid */
+static enum ctype parse_cp_type(const char *what, size_t count)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cp_type); i++) {
+ if (!strcmp(what, cp_type[i]))
+ return i + 1;
+ }
+
+ return CT_NONE;
+}
+
+static const char *cp_type_to_str(enum ctype type)
+{
+ if (type == CT_NONE || type < 0 || type > ARRAY_SIZE(cp_type))
+ return "None";
+
+ return cp_type[type - 1];
+}
+
+static const char *cp_name_to_str(enum cname name)
+{
+ if (name == CN_INVALID || name < 0 || name > ARRAY_SIZE(cp_name))
+ return "INVALID";
+
+ return cp_name[name - 1];
+}
+
+
+static int lkdtm_parse_commandline(void)
+{
+ int i;
+ unsigned long flags;
+
+ if (cpoint_count < 1 || recur_count < 1)
+ return -EINVAL;
+
+ spin_lock_irqsave(&count_lock, flags);
+ count = cpoint_count;
+ spin_unlock_irqrestore(&count_lock, flags);
+
+ /* No special parameters */
+ if (!cpoint_type && !cpoint_name)
+ return 0;
+
+ /* Neither or both of these need to be set */
+ if (!cpoint_type || !cpoint_name)
+ return -EINVAL;
+
+ cptype = parse_cp_type(cpoint_type, strlen(cpoint_type));
+ if (cptype == CT_NONE)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(cp_name); i++) {
+ if (!strcmp(cpoint_name, cp_name[i])) {
+ cpoint = i + 1;
+ return 0;
+ }
+ }
+
+ /* Could not find a valid crash point */
+ return -EINVAL;
+}
+
+static int recursive_loop(int a)
+{
+ char buf[1024];
+
+ memset(buf,0xFF,1024);
+ recur_count--;
+ if (!recur_count)
+ return 0;
+ else
+ return recursive_loop(a);
+}
+
+static void lkdtm_do_action(enum ctype which)
+{
+ switch (which) {
+ case CT_PANIC:
+ panic("dumptest");
+ break;
+ case CT_BUG:
+ BUG();
+ break;
+ case CT_EXCEPTION:
+ *((int *) 0) = 0;
+ break;
+ case CT_LOOP:
+ for (;;)
+ ;
+ break;
+ case CT_OVERFLOW:
+ (void) recursive_loop(0);
+ break;
+ case CT_CORRUPT_STACK: {
+ volatile u32 data[8];
+ volatile u32 *p = data;
+
+ p[12] = 0x12345678;
+ break;
+ }
+ case CT_UNALIGNED_LOAD_STORE_WRITE: {
+ static u8 data[5] __attribute__((aligned(4))) = {1, 2,
+ 3, 4, 5};
+ u32 *p;
+ u32 val = 0x12345678;
+
+ p = (u32 *)(data + 1);
+ if (*p == 0)
+ val = 0x87654321;
+ *p = val;
+ break;
+ }
+ case CT_OVERWRITE_ALLOCATION: {
+ size_t len = 1020;
+ u32 *data = kmalloc(len, GFP_KERNEL);
+
+ data[1024 / sizeof(u32)] = 0x12345678;
+ kfree(data);
+ break;
+ }
+ case CT_WRITE_AFTER_FREE: {
+ size_t len = 1024;
+ u32 *data = kmalloc(len, GFP_KERNEL);
+
+ kfree(data);
+ schedule();
+ memset(data, 0x78, len);
+ break;
+ }
+ case CT_SOFTLOCKUP:
+ preempt_disable();
+ for (;;)
+ cpu_relax();
+ break;
+ case CT_HARDLOCKUP:
+ local_irq_disable();
+ for (;;)
+ cpu_relax();
+ break;
+ case CT_HUNG_TASK:
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule();
+ break;
+ case CT_NONE:
+ default:
+ break;
+ }
+
+}
+
+static void lkdtm_handler(void)
+{
+ unsigned long flags;
+ bool do_it = false;
+
+ spin_lock_irqsave(&count_lock, flags);
+ count--;
+ printk(KERN_INFO "lkdtm: Crash point %s of type %s hit, trigger in %d rounds\n",
+ cp_name_to_str(cpoint), cp_type_to_str(cptype), count);
+
+ if (count == 0) {
+ do_it = true;
+ count = cpoint_count;
+ }
+ spin_unlock_irqrestore(&count_lock, flags);
+
+ if (do_it)
+ lkdtm_do_action(cptype);
+}
+
+static int lkdtm_register_cpoint(enum cname which)
+{
+ int ret;
+
+ cpoint = CN_INVALID;
+ if (lkdtm.entry != NULL)
+ unregister_jprobe(&lkdtm);
+
+ switch (which) {
+ case CN_DIRECT:
+ lkdtm_do_action(cptype);
+ return 0;
+ case CN_INT_HARDWARE_ENTRY:
+ lkdtm.kp.symbol_name = "do_IRQ";
+ lkdtm.entry = (kprobe_opcode_t*) jp_do_irq;
+ break;
+ case CN_INT_HW_IRQ_EN:
+ lkdtm.kp.symbol_name = "handle_IRQ_event";
+ lkdtm.entry = (kprobe_opcode_t*) jp_handle_irq_event;
+ break;
+ case CN_INT_TASKLET_ENTRY:
+ lkdtm.kp.symbol_name = "tasklet_action";
+ lkdtm.entry = (kprobe_opcode_t*) jp_tasklet_action;
+ break;
+ case CN_FS_DEVRW:
+ lkdtm.kp.symbol_name = "ll_rw_block";
+ lkdtm.entry = (kprobe_opcode_t*) jp_ll_rw_block;
+ break;
+ case CN_MEM_SWAPOUT:
+ lkdtm.kp.symbol_name = "shrink_inactive_list";
+ lkdtm.entry = (kprobe_opcode_t*) jp_shrink_inactive_list;
+ break;
+ case CN_TIMERADD:
+ lkdtm.kp.symbol_name = "hrtimer_start";
+ lkdtm.entry = (kprobe_opcode_t*) jp_hrtimer_start;
+ break;
+ case CN_SCSI_DISPATCH_CMD:
+ lkdtm.kp.symbol_name = "scsi_dispatch_cmd";
+ lkdtm.entry = (kprobe_opcode_t*) jp_scsi_dispatch_cmd;
+ break;
+ case CN_IDE_CORE_CP:
+#ifdef CONFIG_IDE
+ lkdtm.kp.symbol_name = "generic_ide_ioctl";
+ lkdtm.entry = (kprobe_opcode_t*) jp_generic_ide_ioctl;
+#else
+ printk(KERN_INFO "lkdtm: Crash point not available\n");
+ return -EINVAL;
+#endif
+ break;
+ default:
+ printk(KERN_INFO "lkdtm: Invalid Crash Point\n");
+ return -EINVAL;
+ }
+
+ cpoint = which;
+ if ((ret = register_jprobe(&lkdtm)) < 0) {
+ printk(KERN_INFO "lkdtm: Couldn't register jprobe\n");
+ cpoint = CN_INVALID;
+ }
+
+ return ret;
+}
+
+static ssize_t do_register_entry(enum cname which, struct file *f,
+ const char __user *user_buf, size_t count, loff_t *off)
+{
+ char *buf;
+ int err;
+
+ if (count >= PAGE_SIZE)
+ return -EINVAL;
+
+ buf = (char *)__get_free_page(GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ if (copy_from_user(buf, user_buf, count)) {
+ free_page((unsigned long) buf);
+ return -EFAULT;
+ }
+ /* NULL-terminate and remove enter */
+ buf[count] = '\0';
+ strim(buf);
+
+ cptype = parse_cp_type(buf, count);
+ free_page((unsigned long) buf);
+
+ if (cptype == CT_NONE)
+ return -EINVAL;
+
+ err = lkdtm_register_cpoint(which);
+ if (err < 0)
+ return err;
+
+ *off += count;
+
+ return count;
+}
+
+/* Generic read callback that just prints out the available crash types */
+static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf,
+ size_t count, loff_t *off)
+{
+ char *buf;
+ int i, n, out;
+
+ buf = (char *)__get_free_page(GFP_KERNEL);
+
+ n = snprintf(buf, PAGE_SIZE, "Available crash types:\n");
+ for (i = 0; i < ARRAY_SIZE(cp_type); i++)
+ n += snprintf(buf + n, PAGE_SIZE - n, "%s\n", cp_type[i]);
+ buf[n] = '\0';
+
+ out = simple_read_from_buffer(user_buf, count, off,
+ buf, n);
+ free_page((unsigned long) buf);
+
+ return out;
+}
+
+static int lkdtm_debugfs_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+
+static ssize_t int_hardware_entry(struct file *f, const char __user *buf,
+ size_t count, loff_t *off)
+{
+ return do_register_entry(CN_INT_HARDWARE_ENTRY, f, buf, count, off);
+}
+
+static ssize_t int_hw_irq_en(struct file *f, const char __user *buf,
+ size_t count, loff_t *off)
+{
+ return do_register_entry(CN_INT_HW_IRQ_EN, f, buf, count, off);
+}
+
+static ssize_t int_tasklet_entry(struct file *f, const char __user *buf,
+ size_t count, loff_t *off)
+{
+ return do_register_entry(CN_INT_TASKLET_ENTRY, f, buf, count, off);
+}
+
+static ssize_t fs_devrw_entry(struct file *f, const char __user *buf,
+ size_t count, loff_t *off)
+{
+ return do_register_entry(CN_FS_DEVRW, f, buf, count, off);
+}
+
+static ssize_t mem_swapout_entry(struct file *f, const char __user *buf,
+ size_t count, loff_t *off)
+{
+ return do_register_entry(CN_MEM_SWAPOUT, f, buf, count, off);
+}
+
+static ssize_t timeradd_entry(struct file *f, const char __user *buf,
+ size_t count, loff_t *off)
+{
+ return do_register_entry(CN_TIMERADD, f, buf, count, off);
+}
+
+static ssize_t scsi_dispatch_cmd_entry(struct file *f,
+ const char __user *buf, size_t count, loff_t *off)
+{
+ return do_register_entry(CN_SCSI_DISPATCH_CMD, f, buf, count, off);
+}
+
+static ssize_t ide_core_cp_entry(struct file *f, const char __user *buf,
+ size_t count, loff_t *off)
+{
+ return do_register_entry(CN_IDE_CORE_CP, f, buf, count, off);
+}
+
+/* Special entry to just crash directly. Available without KPROBEs */
+static ssize_t direct_entry(struct file *f, const char __user *user_buf,
+ size_t count, loff_t *off)
+{
+ enum ctype type;
+ char *buf;
+
+ if (count >= PAGE_SIZE)
+ return -EINVAL;
+ if (count < 1)
+ return -EINVAL;
+
+ buf = (char *)__get_free_page(GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ if (copy_from_user(buf, user_buf, count)) {
+ free_page((unsigned long) buf);
+ return -EFAULT;
+ }
+ /* NULL-terminate and remove enter */
+ buf[count] = '\0';
+ strim(buf);
+
+ type = parse_cp_type(buf, count);
+ free_page((unsigned long) buf);
+ if (type == CT_NONE)
+ return -EINVAL;
+
+ printk(KERN_INFO "lkdtm: Performing direct entry %s\n",
+ cp_type_to_str(type));
+ lkdtm_do_action(type);
+ *off += count;
+
+ return count;
+}
+
+struct crash_entry {
+ const char *name;
+ const struct file_operations fops;
+};
+
+static const struct crash_entry crash_entries[] = {
+ {"DIRECT", {.read = lkdtm_debugfs_read,
+ .llseek = generic_file_llseek,
+ .open = lkdtm_debugfs_open,
+ .write = direct_entry} },
+ {"INT_HARDWARE_ENTRY", {.read = lkdtm_debugfs_read,
+ .llseek = generic_file_llseek,
+ .open = lkdtm_debugfs_open,
+ .write = int_hardware_entry} },
+ {"INT_HW_IRQ_EN", {.read = lkdtm_debugfs_read,
+ .llseek = generic_file_llseek,
+ .open = lkdtm_debugfs_open,
+ .write = int_hw_irq_en} },
+ {"INT_TASKLET_ENTRY", {.read = lkdtm_debugfs_read,
+ .llseek = generic_file_llseek,
+ .open = lkdtm_debugfs_open,
+ .write = int_tasklet_entry} },
+ {"FS_DEVRW", {.read = lkdtm_debugfs_read,
+ .llseek = generic_file_llseek,
+ .open = lkdtm_debugfs_open,
+ .write = fs_devrw_entry} },
+ {"MEM_SWAPOUT", {.read = lkdtm_debugfs_read,
+ .llseek = generic_file_llseek,
+ .open = lkdtm_debugfs_open,
+ .write = mem_swapout_entry} },
+ {"TIMERADD", {.read = lkdtm_debugfs_read,
+ .llseek = generic_file_llseek,
+ .open = lkdtm_debugfs_open,
+ .write = timeradd_entry} },
+ {"SCSI_DISPATCH_CMD", {.read = lkdtm_debugfs_read,
+ .llseek = generic_file_llseek,
+ .open = lkdtm_debugfs_open,
+ .write = scsi_dispatch_cmd_entry} },
+ {"IDE_CORE_CP", {.read = lkdtm_debugfs_read,
+ .llseek = generic_file_llseek,
+ .open = lkdtm_debugfs_open,
+ .write = ide_core_cp_entry} },
+};
+
+static struct dentry *lkdtm_debugfs_root;
+
+static int __init lkdtm_module_init(void)
+{
+ int ret = -EINVAL;
+ int n_debugfs_entries = 1; /* Assume only the direct entry */
+ int i;
+
+ /* Register debugfs interface */
+ lkdtm_debugfs_root = debugfs_create_dir("provoke-crash", NULL);
+ if (!lkdtm_debugfs_root) {
+ printk(KERN_ERR "lkdtm: creating root dir failed\n");
+ return -ENODEV;
+ }
+
+#ifdef CONFIG_KPROBES
+ n_debugfs_entries = ARRAY_SIZE(crash_entries);
+#endif
+
+ for (i = 0; i < n_debugfs_entries; i++) {
+ const struct crash_entry *cur = &crash_entries[i];
+ struct dentry *de;
+
+ de = debugfs_create_file(cur->name, 0644, lkdtm_debugfs_root,
+ NULL, &cur->fops);
+ if (de == NULL) {
+ printk(KERN_ERR "lkdtm: could not create %s\n",
+ cur->name);
+ goto out_err;
+ }
+ }
+
+ if (lkdtm_parse_commandline() == -EINVAL) {
+ printk(KERN_INFO "lkdtm: Invalid command\n");
+ goto out_err;
+ }
+
+ if (cpoint != CN_INVALID && cptype != CT_NONE) {
+ ret = lkdtm_register_cpoint(cpoint);
+ if (ret < 0) {
+ printk(KERN_INFO "lkdtm: Invalid crash point %d\n",
+ cpoint);
+ goto out_err;
+ }
+ printk(KERN_INFO "lkdtm: Crash point %s of type %s registered\n",
+ cpoint_name, cpoint_type);
+ } else {
+ printk(KERN_INFO "lkdtm: No crash points registered, enable through debugfs\n");
+ }
+
+ return 0;
+
+out_err:
+ debugfs_remove_recursive(lkdtm_debugfs_root);
+ return ret;
+}
+
+static void __exit lkdtm_module_exit(void)
+{
+ debugfs_remove_recursive(lkdtm_debugfs_root);
+
+ unregister_jprobe(&lkdtm);
+ printk(KERN_INFO "lkdtm: Crash point unregistered\n");
+}
+
+module_init(lkdtm_module_init);
+module_exit(lkdtm_module_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/max8997-muic.c b/drivers/misc/max8997-muic.c
new file mode 100644
index 00000000..19591eaa
--- /dev/null
+++ b/drivers/misc/max8997-muic.c
@@ -0,0 +1,495 @@
+/*
+ * max8997-muic.c - MAX8997 muic driver for the Maxim 8997
+ *
+ * Copyright (C) 2011 Samsung Electrnoics
+ * Donggeun Kim <dg77.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/kobject.h>
+#include <linux/mfd/max8997.h>
+#include <linux/mfd/max8997-private.h>
+
+/* MAX8997-MUIC STATUS1 register */
+#define STATUS1_ADC_SHIFT 0
+#define STATUS1_ADCLOW_SHIFT 5
+#define STATUS1_ADCERR_SHIFT 6
+#define STATUS1_ADC_MASK (0x1f << STATUS1_ADC_SHIFT)
+#define STATUS1_ADCLOW_MASK (0x1 << STATUS1_ADCLOW_SHIFT)
+#define STATUS1_ADCERR_MASK (0x1 << STATUS1_ADCERR_SHIFT)
+
+/* MAX8997-MUIC STATUS2 register */
+#define STATUS2_CHGTYP_SHIFT 0
+#define STATUS2_CHGDETRUN_SHIFT 3
+#define STATUS2_DCDTMR_SHIFT 4
+#define STATUS2_DBCHG_SHIFT 5
+#define STATUS2_VBVOLT_SHIFT 6
+#define STATUS2_CHGTYP_MASK (0x7 << STATUS2_CHGTYP_SHIFT)
+#define STATUS2_CHGDETRUN_MASK (0x1 << STATUS2_CHGDETRUN_SHIFT)
+#define STATUS2_DCDTMR_MASK (0x1 << STATUS2_DCDTMR_SHIFT)
+#define STATUS2_DBCHG_MASK (0x1 << STATUS2_DBCHG_SHIFT)
+#define STATUS2_VBVOLT_MASK (0x1 << STATUS2_VBVOLT_SHIFT)
+
+/* MAX8997-MUIC STATUS3 register */
+#define STATUS3_OVP_SHIFT 2
+#define STATUS3_OVP_MASK (0x1 << STATUS3_OVP_SHIFT)
+
+/* MAX8997-MUIC CONTROL1 register */
+#define COMN1SW_SHIFT 0
+#define COMP2SW_SHIFT 3
+#define COMN1SW_MASK (0x7 << COMN1SW_SHIFT)
+#define COMP2SW_MASK (0x7 << COMP2SW_SHIFT)
+#define SW_MASK (COMP2SW_MASK | COMN1SW_MASK)
+
+#define MAX8997_SW_USB ((1 << COMP2SW_SHIFT) | (1 << COMN1SW_SHIFT))
+#define MAX8997_SW_AUDIO ((2 << COMP2SW_SHIFT) | (2 << COMN1SW_SHIFT))
+#define MAX8997_SW_UART ((3 << COMP2SW_SHIFT) | (3 << COMN1SW_SHIFT))
+#define MAX8997_SW_OPEN ((0 << COMP2SW_SHIFT) | (0 << COMN1SW_SHIFT))
+
+#define MAX8997_ADC_GROUND 0x00
+#define MAX8997_ADC_MHL 0x01
+#define MAX8997_ADC_JIG_USB_1 0x18
+#define MAX8997_ADC_JIG_USB_2 0x19
+#define MAX8997_ADC_DESKDOCK 0x1a
+#define MAX8997_ADC_JIG_UART 0x1c
+#define MAX8997_ADC_CARDOCK 0x1d
+#define MAX8997_ADC_OPEN 0x1f
+
+struct max8997_muic_irq {
+ unsigned int irq;
+ const char *name;
+};
+
+static struct max8997_muic_irq muic_irqs[] = {
+ { MAX8997_MUICIRQ_ADCError, "muic-ADC_error" },
+ { MAX8997_MUICIRQ_ADCLow, "muic-ADC_low" },
+ { MAX8997_MUICIRQ_ADC, "muic-ADC" },
+ { MAX8997_MUICIRQ_VBVolt, "muic-VB_voltage" },
+ { MAX8997_MUICIRQ_DBChg, "muic-DB_charger" },
+ { MAX8997_MUICIRQ_DCDTmr, "muic-DCD_timer" },
+ { MAX8997_MUICIRQ_ChgDetRun, "muic-CDR_status" },
+ { MAX8997_MUICIRQ_ChgTyp, "muic-charger_type" },
+ { MAX8997_MUICIRQ_OVP, "muic-over_voltage" },
+};
+
+struct max8997_muic_info {
+ struct device *dev;
+ struct max8997_dev *iodev;
+ struct i2c_client *muic;
+ struct max8997_muic_platform_data *muic_pdata;
+
+ int irq;
+ struct work_struct irq_work;
+
+ enum max8997_muic_charger_type pre_charger_type;
+ int pre_adc;
+
+ struct mutex mutex;
+};
+
+static int max8997_muic_handle_usb(struct max8997_muic_info *info,
+ enum max8997_muic_usb_type usb_type, bool attached)
+{
+ struct max8997_muic_platform_data *mdata = info->muic_pdata;
+ int ret = 0;
+
+ if (usb_type == MAX8997_USB_HOST) {
+ /* switch to USB */
+ ret = max8997_update_reg(info->muic, MAX8997_MUIC_REG_CONTROL1,
+ attached ? MAX8997_SW_USB : MAX8997_SW_OPEN,
+ SW_MASK);
+ if (ret) {
+ dev_err(info->dev, "failed to update muic register\n");
+ goto out;
+ }
+ }
+
+ if (mdata->usb_callback)
+ mdata->usb_callback(usb_type, attached);
+out:
+ return ret;
+}
+
+static void max8997_muic_handle_mhl(struct max8997_muic_info *info,
+ bool attached)
+{
+ struct max8997_muic_platform_data *mdata = info->muic_pdata;
+
+ if (mdata->mhl_callback)
+ mdata->mhl_callback(attached);
+}
+
+static int max8997_muic_handle_dock(struct max8997_muic_info *info,
+ int adc, bool attached)
+{
+ struct max8997_muic_platform_data *mdata = info->muic_pdata;
+ int ret = 0;
+
+ /* switch to AUDIO */
+ ret = max8997_update_reg(info->muic, MAX8997_MUIC_REG_CONTROL1,
+ attached ? MAX8997_SW_AUDIO : MAX8997_SW_OPEN,
+ SW_MASK);
+ if (ret) {
+ dev_err(info->dev, "failed to update muic register\n");
+ goto out;
+ }
+
+ switch (adc) {
+ case MAX8997_ADC_DESKDOCK:
+ if (mdata->deskdock_callback)
+ mdata->deskdock_callback(attached);
+ break;
+ case MAX8997_ADC_CARDOCK:
+ if (mdata->cardock_callback)
+ mdata->cardock_callback(attached);
+ break;
+ default:
+ break;
+ }
+out:
+ return ret;
+}
+
+static int max8997_muic_handle_jig_uart(struct max8997_muic_info *info,
+ bool attached)
+{
+ struct max8997_muic_platform_data *mdata = info->muic_pdata;
+ int ret = 0;
+
+ /* switch to UART */
+ ret = max8997_update_reg(info->muic, MAX8997_MUIC_REG_CONTROL1,
+ attached ? MAX8997_SW_UART : MAX8997_SW_OPEN,
+ SW_MASK);
+ if (ret) {
+ dev_err(info->dev, "failed to update muic register\n");
+ goto out;
+ }
+
+ if (mdata->uart_callback)
+ mdata->uart_callback(attached);
+out:
+ return ret;
+}
+
+static int max8997_muic_handle_adc_detach(struct max8997_muic_info *info)
+{
+ int ret = 0;
+
+ switch (info->pre_adc) {
+ case MAX8997_ADC_GROUND:
+ ret = max8997_muic_handle_usb(info, MAX8997_USB_HOST, false);
+ break;
+ case MAX8997_ADC_MHL:
+ max8997_muic_handle_mhl(info, false);
+ break;
+ case MAX8997_ADC_JIG_USB_1:
+ case MAX8997_ADC_JIG_USB_2:
+ ret = max8997_muic_handle_usb(info, MAX8997_USB_DEVICE, false);
+ break;
+ case MAX8997_ADC_DESKDOCK:
+ case MAX8997_ADC_CARDOCK:
+ ret = max8997_muic_handle_dock(info, info->pre_adc, false);
+ break;
+ case MAX8997_ADC_JIG_UART:
+ ret = max8997_muic_handle_jig_uart(info, false);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int max8997_muic_handle_adc(struct max8997_muic_info *info, int adc)
+{
+ int ret = 0;
+
+ switch (adc) {
+ case MAX8997_ADC_GROUND:
+ ret = max8997_muic_handle_usb(info, MAX8997_USB_HOST, true);
+ break;
+ case MAX8997_ADC_MHL:
+ max8997_muic_handle_mhl(info, true);
+ break;
+ case MAX8997_ADC_JIG_USB_1:
+ case MAX8997_ADC_JIG_USB_2:
+ ret = max8997_muic_handle_usb(info, MAX8997_USB_DEVICE, true);
+ break;
+ case MAX8997_ADC_DESKDOCK:
+ case MAX8997_ADC_CARDOCK:
+ ret = max8997_muic_handle_dock(info, adc, true);
+ break;
+ case MAX8997_ADC_JIG_UART:
+ ret = max8997_muic_handle_jig_uart(info, true);
+ break;
+ case MAX8997_ADC_OPEN:
+ ret = max8997_muic_handle_adc_detach(info);
+ break;
+ default:
+ break;
+ }
+
+ info->pre_adc = adc;
+
+ return ret;
+}
+
+static int max8997_muic_handle_charger_type(struct max8997_muic_info *info,
+ enum max8997_muic_charger_type charger_type)
+{
+ struct max8997_muic_platform_data *mdata = info->muic_pdata;
+ u8 adc;
+ int ret;
+
+ ret = max8997_read_reg(info->muic, MAX8997_MUIC_REG_STATUS1, &adc);
+ if (ret) {
+ dev_err(info->dev, "failed to read muic register\n");
+ goto out;
+ }
+
+ switch (charger_type) {
+ case MAX8997_CHARGER_TYPE_NONE:
+ if (mdata->charger_callback)
+ mdata->charger_callback(false, charger_type);
+ if (info->pre_charger_type == MAX8997_CHARGER_TYPE_USB) {
+ max8997_muic_handle_usb(info,
+ MAX8997_USB_DEVICE, false);
+ }
+ break;
+ case MAX8997_CHARGER_TYPE_USB:
+ if ((adc & STATUS1_ADC_MASK) == MAX8997_ADC_OPEN) {
+ max8997_muic_handle_usb(info,
+ MAX8997_USB_DEVICE, true);
+ }
+ if (mdata->charger_callback)
+ mdata->charger_callback(true, charger_type);
+ break;
+ case MAX8997_CHARGER_TYPE_DOWNSTREAM_PORT:
+ case MAX8997_CHARGER_TYPE_DEDICATED_CHG:
+ case MAX8997_CHARGER_TYPE_500MA:
+ case MAX8997_CHARGER_TYPE_1A:
+ if (mdata->charger_callback)
+ mdata->charger_callback(true, charger_type);
+ break;
+ default:
+ break;
+ }
+
+ info->pre_charger_type = charger_type;
+out:
+ return ret;
+}
+
+static void max8997_muic_irq_work(struct work_struct *work)
+{
+ struct max8997_muic_info *info = container_of(work,
+ struct max8997_muic_info, irq_work);
+ struct max8997_platform_data *pdata =
+ dev_get_platdata(info->iodev->dev);
+ u8 status[3];
+ u8 adc, chg_type;
+
+ int irq_type = info->irq - pdata->irq_base;
+ int ret;
+
+ mutex_lock(&info->mutex);
+
+ ret = max8997_bulk_read(info->muic, MAX8997_MUIC_REG_STATUS1,
+ 3, status);
+ if (ret) {
+ dev_err(info->dev, "failed to read muic register\n");
+ mutex_unlock(&info->mutex);
+ return;
+ }
+
+ dev_dbg(info->dev, "%s: STATUS1:0x%x, 2:0x%x\n", __func__,
+ status[0], status[1]);
+
+ switch (irq_type) {
+ case MAX8997_MUICIRQ_ADC:
+ adc = status[0] & STATUS1_ADC_MASK;
+ adc >>= STATUS1_ADC_SHIFT;
+
+ max8997_muic_handle_adc(info, adc);
+ break;
+ case MAX8997_MUICIRQ_ChgTyp:
+ chg_type = status[1] & STATUS2_CHGTYP_MASK;
+ chg_type >>= STATUS2_CHGTYP_SHIFT;
+
+ max8997_muic_handle_charger_type(info, chg_type);
+ break;
+ default:
+ dev_info(info->dev, "misc interrupt: %s occurred\n",
+ muic_irqs[irq_type].name);
+ break;
+ }
+
+ mutex_unlock(&info->mutex);
+
+ return;
+}
+
+static irqreturn_t max8997_muic_irq_handler(int irq, void *data)
+{
+ struct max8997_muic_info *info = data;
+
+ dev_dbg(info->dev, "irq:%d\n", irq);
+ info->irq = irq;
+
+ schedule_work(&info->irq_work);
+
+ return IRQ_HANDLED;
+}
+
+static void max8997_muic_detect_dev(struct max8997_muic_info *info)
+{
+ int ret;
+ u8 status[2], adc, chg_type;
+
+ ret = max8997_bulk_read(info->muic, MAX8997_MUIC_REG_STATUS1,
+ 2, status);
+ if (ret) {
+ dev_err(info->dev, "failed to read muic register\n");
+ return;
+ }
+
+ dev_info(info->dev, "STATUS1:0x%x, STATUS2:0x%x\n",
+ status[0], status[1]);
+
+ adc = status[0] & STATUS1_ADC_MASK;
+ adc >>= STATUS1_ADC_SHIFT;
+
+ chg_type = status[1] & STATUS2_CHGTYP_MASK;
+ chg_type >>= STATUS2_CHGTYP_SHIFT;
+
+ max8997_muic_handle_adc(info, adc);
+ max8997_muic_handle_charger_type(info, chg_type);
+}
+
+static void max8997_initialize_device(struct max8997_muic_info *info)
+{
+ struct max8997_muic_platform_data *mdata = info->muic_pdata;
+ int i;
+
+ for (i = 0; i < mdata->num_init_data; i++) {
+ max8997_write_reg(info->muic, mdata->init_data[i].addr,
+ mdata->init_data[i].data);
+ }
+}
+
+static int __devinit max8997_muic_probe(struct platform_device *pdev)
+{
+ struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+ struct max8997_platform_data *pdata = dev_get_platdata(iodev->dev);
+ struct max8997_muic_info *info;
+ int ret, i;
+
+ info = kzalloc(sizeof(struct max8997_muic_info), GFP_KERNEL);
+ if (!info) {
+ dev_err(&pdev->dev, "failed to allocate memory\n");
+ ret = -ENOMEM;
+ goto err_kfree;
+ }
+
+ if (!pdata->muic_pdata) {
+ dev_err(&pdev->dev, "failed to get platform_data\n");
+ ret = -EINVAL;
+ goto err_pdata;
+ }
+ info->muic_pdata = pdata->muic_pdata;
+
+ info->dev = &pdev->dev;
+ info->iodev = iodev;
+ info->muic = iodev->muic;
+
+ platform_set_drvdata(pdev, info);
+ mutex_init(&info->mutex);
+
+ INIT_WORK(&info->irq_work, max8997_muic_irq_work);
+
+ for (i = 0; i < ARRAY_SIZE(muic_irqs); i++) {
+ struct max8997_muic_irq *muic_irq = &muic_irqs[i];
+
+ ret = request_threaded_irq(pdata->irq_base + muic_irq->irq,
+ NULL, max8997_muic_irq_handler,
+ 0, muic_irq->name,
+ info);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed: irq request (IRQ: %d,"
+ " error :%d)\n",
+ muic_irq->irq, ret);
+
+ for (i = i - 1; i >= 0; i--)
+ free_irq(muic_irq->irq, info);
+
+ goto err_irq;
+ }
+ }
+
+ /* Initialize registers according to platform data */
+ max8997_initialize_device(info);
+
+ /* Initial device detection */
+ max8997_muic_detect_dev(info);
+
+ return ret;
+
+err_irq:
+err_pdata:
+ kfree(info);
+err_kfree:
+ return ret;
+}
+
+static int __devexit max8997_muic_remove(struct platform_device *pdev)
+{
+ struct max8997_muic_info *info = platform_get_drvdata(pdev);
+ struct max8997_platform_data *pdata =
+ dev_get_platdata(info->iodev->dev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(muic_irqs); i++)
+ free_irq(pdata->irq_base + muic_irqs[i].irq, info);
+ cancel_work_sync(&info->irq_work);
+
+ kfree(info);
+
+ return 0;
+}
+
+static struct platform_driver max8997_muic_driver = {
+ .driver = {
+ .name = "max8997-muic",
+ .owner = THIS_MODULE,
+ },
+ .probe = max8997_muic_probe,
+ .remove = __devexit_p(max8997_muic_remove),
+};
+
+module_platform_driver(max8997_muic_driver);
+
+MODULE_DESCRIPTION("Maxim MAX8997 MUIC driver");
+MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/mediatek/Kconfig b/drivers/misc/mediatek/Kconfig
new file mode 100755
index 00000000..d882c767
--- /dev/null
+++ b/drivers/misc/mediatek/Kconfig
@@ -0,0 +1,9 @@
+menuconfig MTK_WIRELESS_SOLUTION
+ bool "MTK wireless chip configuration"
+ help
+ "enable/disable and config MTK wireless solution"
+
+if MTK_WIRELESS_SOLUTION
+source "drivers/misc/mediatek/combo_mt66xx/Kconfig"
+source "drivers/misc/mediatek/mt6622/Kconfig"
+endif
diff --git a/drivers/misc/mediatek/Makefile b/drivers/misc/mediatek/Makefile
new file mode 100755
index 00000000..d308cb1c
--- /dev/null
+++ b/drivers/misc/mediatek/Makefile
@@ -0,0 +1,3 @@
+obj-y += combo_mt66xx/
+#obj-$(CONFIG_MTK_MT6622) += mt6622/
+
diff --git a/drivers/misc/mediatek/combo_mt66xx/Kconfig b/drivers/misc/mediatek/combo_mt66xx/Kconfig
new file mode 100755
index 00000000..148f0505
--- /dev/null
+++ b/drivers/misc/mediatek/combo_mt66xx/Kconfig
@@ -0,0 +1,35 @@
+menuconfig MTK_COMBO_MT66XX
+ bool "MediaTek combo_mt66xx Config"
+ help
+ Config MTK combo chip mt6628&mt6620
+
+if MTK_COMBO_MT66XX
+
+config MTK_COMBO_WMT
+ tristate "MediaTek Combo Chip wireless managment tool"
+ default y
+ help
+ MTK wireless managment tool for WIFI/BT/GPS/FM
+
+config MTK_COMBO_BT
+ tristate "MediaTek Combo Chip BT driver"
+ default y
+ #depends on MTK_COMBO_WMT
+ help
+ MTK BT /dev/stpbt driver for Bluedroid (mtk_stp_bt.ko)
+
+config MTK_COMBO_FM
+ tristate "MediaTek Combo Chip FM driver"
+ default y
+ #depends on MTK_COMBO_WMT
+ help
+ MTK FM /dev/fm driver
+
+config MTK_COMBO_GPS
+ tristate "MediaTek GPS Support"
+ default y
+ #depends on MTK_COMBO_WMT
+ help
+ MTK GPS /dev/gps driver (mtk_gps.ko)
+
+endif
diff --git a/drivers/misc/mediatek/combo_mt66xx/Makefile b/drivers/misc/mediatek/combo_mt66xx/Makefile
new file mode 100755
index 00000000..3ec2afb7
--- /dev/null
+++ b/drivers/misc/mediatek/combo_mt66xx/Makefile
@@ -0,0 +1,39 @@
+# Copyright Statement:
+#
+# This software/firmware and related documentation ("MediaTek Software") are
+# protected under relevant copyright laws. The information contained herein
+# is confidential and proprietary to MediaTek Inc. and/or its licensors.
+# Without the prior written permission of MediaTek inc. and/or its licensors,
+# any reproduction, modification, use or disclosure of MediaTek Software,
+# and information contained herein, in whole or in part, shall be strictly prohibited.
+#
+# MediaTek Inc. (C) 2010. All rights reserved.
+#
+# BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
+# THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
+# RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
+# AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
+# NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
+# SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
+# SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
+# THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+# THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES
+# CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK
+# SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
+# STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
+# CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
+# AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
+# OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
+# MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
+#
+# The following software/firmware and/or related documentation ("MediaTek Software")
+# have been modified by MediaTek Inc. All revisions are subject to any receiver's
+# applicable license agreements with MediaTek Inc.
+
+obj-y += wmt/
+obj-y += fm/
+obj-y += bt/
+obj-y += gps/
+
diff --git a/drivers/misc/mediatek/combo_mt66xx/bt/Makefile b/drivers/misc/mediatek/combo_mt66xx/bt/Makefile
new file mode 100755
index 00000000..41612ef7
--- /dev/null
+++ b/drivers/misc/mediatek/combo_mt66xx/bt/Makefile
@@ -0,0 +1,41 @@
+# Copyright Statement:
+#
+# This software/firmware and related documentation ("MediaTek Software") are
+# protected under relevant copyright laws. The information contained herein
+# is confidential and proprietary to MediaTek Inc. and/or its licensors.
+# Without the prior written permission of MediaTek inc. and/or its licensors,
+# any reproduction, modification, use or disclosure of MediaTek Software,
+# and information contained herein, in whole or in part, shall be strictly prohibited.
+#
+# MediaTek Inc. (C) 2010. All rights reserved.
+#
+# BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
+# THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
+# RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
+# AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
+# NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
+# SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
+# SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
+# THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+# THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES
+# CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK
+# SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
+# STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
+# CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
+# AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
+# OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
+# MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
+#
+# The following software/firmware and/or related documentation ("MediaTek Software")
+# have been modified by MediaTek Inc. All revisions are subject to any receiver's
+# applicable license agreements with MediaTek Inc.
+
+
+#
+# Makefile for the Linux Bluetooth HCI device drivers.
+#
+
+obj-y += uhid/
+
diff --git a/drivers/misc/mediatek/combo_mt66xx/bt/uhid/Makefile b/drivers/misc/mediatek/combo_mt66xx/bt/uhid/Makefile
new file mode 100755
index 00000000..f2e00c9d
--- /dev/null
+++ b/drivers/misc/mediatek/combo_mt66xx/bt/uhid/Makefile
@@ -0,0 +1,46 @@
+# Copyright Statement:
+#
+# This software/firmware and related documentation ("MediaTek Software") are
+# protected under relevant copyright laws. The information contained herein
+# is confidential and proprietary to MediaTek Inc. and/or its licensors.
+# Without the prior written permission of MediaTek inc. and/or its licensors,
+# any reproduction, modification, use or disclosure of MediaTek Software,
+# and information contained herein, in whole or in part, shall be strictly prohibited.
+
+# MediaTek Inc. (C) 2010. All rights reserved.
+#
+# BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
+# THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
+# RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
+# AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
+# NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
+# SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
+# SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
+# THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+# THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES
+# CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK
+# SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
+# STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
+# CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
+# AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
+# OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
+# MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
+#
+# The following software/firmware and/or related documentation ("MediaTek Software")
+# have been modified by MediaTek Inc. All revisions are subject to any receiver's
+# applicable license agreements with MediaTek Inc.
+
+
+#
+# Makefile for the Bluetooth UHID driver.
+#
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+
+obj-y += bt_uhid.o
+
+# EOF
diff --git a/drivers/misc/mediatek/combo_mt66xx/fm/Makefile b/drivers/misc/mediatek/combo_mt66xx/fm/Makefile
new file mode 100755
index 00000000..11c3ca61
--- /dev/null
+++ b/drivers/misc/mediatek/combo_mt66xx/fm/Makefile
@@ -0,0 +1,72 @@
+# Makefile generated by Mediatek
+CONFIG_MTK_COMBO_FM:=m
+#mt6628 fm
+ccflags-y += -I$(src)/inc \
+ -I$(src)/mt6628/inc \
+ -I$(src)/../wmt/include \
+ -I$(src)/../wmt/linux/include \
+ -I$(src)/cust/mt6628 \
+ -I$(src)/private/inc \
+ -DMT6628_FM
+
+obj-$(CONFIG_MTK_COMBO_FM) += mtk_fm_drv.o
+ mtk_fm_drv-objs += core/fm_module.o \
+ core/fm_main.o \
+ core/fm_config.o \
+ core/fm_rds_parser.o \
+ core/fm_patch.o \
+ core/fm_utils.o \
+ core/fm_link.o \
+ core/fm_eint.o \
+ mt6628/pub/mt6628_fm_lib.o \
+ mt6628/pub/mt6628_fm_rds.o \
+ mt6628/pub/mt6628_fm_cmd.o \
+ mt6628/pub/mt6628_fm_config.o
+#mt6620 fm start
+ ccflags-y += -I$(src)/inc \
+ -I$(src)/mt6620/inc \
+ -I$(src)/../wmt/include \
+ -I$(src)/../wmt/linux/include \
+ -I$(src)/cust/mt6620 \
+ -I$(src)/private/inc \
+ -DMT6620_FM
+
+ obj-$(CONFIG_MTK_COMBO_FM) += mtk_fm_drv.o
+ mtk_fm_drv-objs += core/fm_module.o \
+ core/fm_main.o \
+ core/fm_config.o \
+ core/fm_rds_parser.o \
+ core/fm_patch.o \
+ core/fm_utils.o \
+ core/fm_link.o \
+ core/fm_eint.o \
+ mt6620/pub/mt6620_fm_lib.o \
+ mt6620/pub/mt6620_fm_rds.o \
+ mt6620/pub/mt6620_fm_cmd.o \
+ mt6620/pub/mt6620_fm_config.o
+
+# MT6626 FM driver
+ifeq ($(CONFIG_MTK_COMBO_CHIP_MT6626), y)
+FM_CHIP = mt6626
+FM_CHIP_PATH = $(FM_CHIP)/pub/$(FM_CHIP)
+ccflags-y := -I$(src)/inc \
+ -I$(src)/$(FM_CHIP)/inc \
+ -I$(src)/cust/$(FM_CHIP) \
+ -I$(src)/private/inc \
+ -DMT6626_FM
+
+obj-$(CONFIG_MTK_COMBO_FM) += mtk_fm_drv.o
+mtk_fm_drv-objs := core/fm_module.o \
+ core/fm_main.o \
+ core/fm_config.o \
+ core/fm_patch.o \
+ core/fm_rds_parser.o \
+ core/fm_utils.o \
+ core/fm_link.o \
+ $(FM_CHIP_PATH)_fm_lib.o \
+ $(FM_CHIP_PATH)_fm_rds.o \
+ $(FM_CHIP_PATH)_fm_link.o \
+ $(FM_CHIP_PATH)_fm_eint.o
+endif
+
+
diff --git a/drivers/misc/mediatek/combo_mt66xx/fm/private/Makefile b/drivers/misc/mediatek/combo_mt66xx/fm/private/Makefile
new file mode 100755
index 00000000..1a8e7960
--- /dev/null
+++ b/drivers/misc/mediatek/combo_mt66xx/fm/private/Makefile
@@ -0,0 +1,44 @@
+# Makefile generated by Mediatek
+
+# fm support
+ifeq ($(MTK_FM_SUPPORT), yes)
+
+ifeq ($(CUSTOM_KERNEL_FM), mt6628)
+PRIV_CHIP = mt6628
+PRIV_SRC_PATH = private/src/$(PRIV_CHIP)
+PRIV_CONFIG = $(CONFIG_MTK_FM)
+
+ccflags-y := \
+ -I$(src)/inc \
+ -I$(src)/$(PRIV_SRC_PATH) \
+ -I$(src)/../inc \
+ -I$(src)/../cust/$(PRIV_CHIP) \
+ -I$(src)/../../combo/common_mt6628/include \
+ -I$(src)/../../combo/common_mt6628/linux/include
+
+
+obj-$(PRIV_CONFIG) += mtk_fm_priv.o
+mtk_fm_priv-objs := \
+ src/$(PRIV_CHIP)/$(PRIV_CHIP)_fm_private.o
+endif
+
+ifeq ($(CUSTOM_KERNEL_FM), mt6620)
+PRIV_CHIP = mt6620
+PRIV_SRC_PATH = private/src/$(PRIV_CHIP)
+PRIV_CONFIG = $(CONFIG_MTK_FM)
+
+ccflags-y := \
+ -I$(src)/inc \
+ -I$(src)/$(PRIV_SRC_PATH) \
+ -I$(src)/../inc \
+ -I$(src)/../cust/$(PRIV_CHIP) \
+ -I$(src)/../../combo/common/include \
+ -I$(src)/../../combo/common/linux/include
+
+
+obj-$(PRIV_CONFIG) += mtk_fm_priv.o
+mtk_fm_priv-objs := \
+ src/$(PRIV_CHIP)/$(PRIV_CHIP)_fm_private.o
+endif
+
+endif
diff --git a/drivers/misc/mediatek/combo_mt66xx/gps/Makefile b/drivers/misc/mediatek/combo_mt66xx/gps/Makefile
new file mode 100755
index 00000000..19d0d43a
--- /dev/null
+++ b/drivers/misc/mediatek/combo_mt66xx/gps/Makefile
@@ -0,0 +1,50 @@
+# Copyright Statement:
+#
+# This software/firmware and related documentation ("MediaTek Software") are
+# protected under relevant copyright laws. The information contained herein
+# is confidential and proprietary to MediaTek Inc. and/or its licensors.
+# Without the prior written permission of MediaTek inc. and/or its licensors,
+# any reproduction, modification, use or disclosure of MediaTek Software,
+# and information contained herein, in whole or in part, shall be strictly prohibited.
+
+# MediaTek Inc. (C) 2010. All rights reserved.
+#
+# BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
+# THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
+# RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
+# AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
+# NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
+# SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
+# SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
+# THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+# THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES
+# CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK
+# SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
+# STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
+# CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
+# AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
+# OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
+# MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
+#
+# The following software/firmware and/or related documentation ("MediaTek Software")
+# have been modified by MediaTek Inc. All revisions are subject to any receiver's
+# applicable license agreements with MediaTek Inc.
+
+
+# drivers/barcelona/gps/Makefile
+#
+# Makefile for the Barcelona GPS driver.
+#
+# Copyright (C) 2004,2005 TomTom BV <http://www.tomtom.com/>
+# Author: Dimitry Andric <dimitry.andric@tomtom.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+
+obj-y += gps.o
+
+
+# EOF
diff --git a/drivers/misc/mediatek/combo_mt66xx/wmt/Makefile b/drivers/misc/mediatek/combo_mt66xx/wmt/Makefile
new file mode 100755
index 00000000..64e0b362
--- /dev/null
+++ b/drivers/misc/mediatek/combo_mt66xx/wmt/Makefile
@@ -0,0 +1,78 @@
+MTK_WCN_CMB_SDIO_EINT := y
+CONFIG_MTK_COMBO_WMT:=m
+CONFIG_MTK_COMBO_BT:=m
+CONFIG_MTK_COMBO_GPS:=m
+
+ccflags-y += -I$(src)/linux/include -I$(src)/core/include -I$(src)/include -I$(src)/
+ccflags-y += -DWMT_PLAT_APEX=1
+ccflags-y += -DWMT_PLAT_ALPS=0
+ccflags-y += -DWMT_UART_RX_MODE_WORK=1 # 1. work thread 0. tasklet
+ccflags-y += -DREMOVE_MK_NODE=1
+ifeq ($(strip $(MTK_WCN_CMB_SDIO_EINT)), y)
+ccflags-y += -DMTK_CMB_SDIO_EINT
+endif
+
+#stub to build-in
+obj-y += platform/vendor/mtk_wcn_cmb_stub.o
+
+#Common SDIO driver for WIFI and STP(mtk_hif_sdio.ko)
+obj-$(CONFIG_MTK_COMBO_WMT) += mtk_hif_sdio.o
+mtk_hif_sdio-objs := linux/hif_sdio.o \
+ linux/hif_sdio_chrdev.o \
+ linux/osal.o
+
+ifeq ($(MTK_WCN_CMB_SDIO_EINT), y)
+mtk_hif_sdio-objs += linux/hif_sdio_eint.o
+endif
+
+# WMT/STP DRIVER(mtk_stp_wmt.ko)
+obj-$(CONFIG_MTK_COMBO_WMT) += mtk_stp_wmt.o
+mtk_stp_wmt-objs := core/wmt_core.o \
+ core/wmt_ctrl.o \
+ core/wmt_func.o \
+ core/wmt_ic_6620.o \
+ core/wmt_lib.o \
+ core/wmt_conf.o \
+ core/wmt_dbg.o \
+ core/wmt_exp.o \
+ core/wmt_ic_6628.o \
+ linux/wmt_dev.o \
+ linux/wmt_tm.o \
+ platform/vendor/wmt_plat.o \
+ platform/vendor/wmt_plat_stub.o \
+ platform/vendor/mtk_wcn_cmb_hw.o \
+ core/stp_exp.o \
+ core/stp_core.o \
+ core/psm_core.o \
+ core/btm_core.o \
+ linux/stp_dbg.o
+ifeq ($(CONFIG_MTK_COMBO_WMT), m)
+mtk_stp_wmt-objs += linux/osal.o
+endif
+
+#WMT/STP use UART interface(mtk_stp_uart.ko)
+obj-$(CONFIG_MTK_COMBO_WMT) += mtk_stp_uart.o
+mtk_stp_uart-objs := linux/stp_uart.o
+
+#WMT/STP use SDIO interface(mtk_stp_sdio.ko)
+obj-$(CONFIG_MTK_COMBO_WMT) += mtk_stp_sdio.o
+mtk_stp_sdio-objs := linux/stp_sdio.o \
+
+ifeq ($(CONFIG_MTK_COMBO_WMT), m)
+mtk_stp_sdio-objs += linux/osal.o
+endif
+
+#BT character device driver
+obj-$(CONFIG_MTK_COMBO_BT) += mtk_stp_bt.o
+mtk_stp_bt-objs := linux/stp_chrdev_bt.o
+
+#GPS character device driver
+obj-$(CONFIG_MTK_COMBO_GPS) += mtk_stp_gps.o
+mtk_stp_gps-objs := linux/stp_chrdev_gps.o
+
+#WIFI character device driver
+obj-$(CONFIG_MTK_COMBO_WIFI) += mtk_wmt_wifi.o
+mtk_wmt_wifi-objs := linux/wmt_chrdev_wifi.o
+
+#FM don't need such character device
+
diff --git a/drivers/misc/mediatek/dummy.c b/drivers/misc/mediatek/dummy.c
new file mode 100755
index 00000000..e69de29b
--- /dev/null
+++ b/drivers/misc/mediatek/dummy.c
diff --git a/drivers/misc/pch_phub.c b/drivers/misc/pch_phub.c
new file mode 100644
index 00000000..10fc4785
--- /dev/null
+++ b/drivers/misc/pch_phub.c
@@ -0,0 +1,914 @@
+/*
+ * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/if_ether.h>
+#include <linux/ctype.h>
+#include <linux/dmi.h>
+
+#define PHUB_STATUS 0x00 /* Status Register offset */
+#define PHUB_CONTROL 0x04 /* Control Register offset */
+#define PHUB_TIMEOUT 0x05 /* Time out value for Status Register */
+#define PCH_PHUB_ROM_WRITE_ENABLE 0x01 /* Enabling for writing ROM */
+#define PCH_PHUB_ROM_WRITE_DISABLE 0x00 /* Disabling for writing ROM */
+#define PCH_PHUB_MAC_START_ADDR_EG20T 0x14 /* MAC data area start address
+ offset */
+#define PCH_PHUB_MAC_START_ADDR_ML7223 0x20C /* MAC data area start address
+ offset */
+#define PCH_PHUB_ROM_START_ADDR_EG20T 0x80 /* ROM data area start address offset
+ (Intel EG20T PCH)*/
+#define PCH_PHUB_ROM_START_ADDR_ML7213 0x400 /* ROM data area start address
+ offset(LAPIS Semicon ML7213)
+ */
+#define PCH_PHUB_ROM_START_ADDR_ML7223 0x400 /* ROM data area start address
+ offset(LAPIS Semicon ML7223)
+ */
+
+/* MAX number of INT_REDUCE_CONTROL registers */
+#define MAX_NUM_INT_REDUCE_CONTROL_REG 128
+#define PCI_DEVICE_ID_PCH1_PHUB 0x8801
+#define PCH_MINOR_NOS 1
+#define CLKCFG_CAN_50MHZ 0x12000000
+#define CLKCFG_CANCLK_MASK 0xFF000000
+#define CLKCFG_UART_MASK 0xFFFFFF
+
+/* CM-iTC */
+#define CLKCFG_UART_48MHZ (1 << 16)
+#define CLKCFG_BAUDDIV (2 << 20)
+#define CLKCFG_PLL2VCO (8 << 9)
+#define CLKCFG_UARTCLKSEL (1 << 18)
+
+/* Macros for ML7213 */
+#define PCI_VENDOR_ID_ROHM 0x10db
+#define PCI_DEVICE_ID_ROHM_ML7213_PHUB 0x801A
+
+/* Macros for ML7213 */
+#define PCI_VENDOR_ID_ROHM 0x10db
+#define PCI_DEVICE_ID_ROHM_ML7213_PHUB 0x801A
+
+/* Macros for ML7223 */
+#define PCI_DEVICE_ID_ROHM_ML7223_mPHUB 0x8012 /* for Bus-m */
+#define PCI_DEVICE_ID_ROHM_ML7223_nPHUB 0x8002 /* for Bus-n */
+
+/* Macros for ML7831 */
+#define PCI_DEVICE_ID_ROHM_ML7831_PHUB 0x8801
+
+/* SROM ACCESS Macro */
+#define PCH_WORD_ADDR_MASK (~((1 << 2) - 1))
+
+/* Registers address offset */
+#define PCH_PHUB_ID_REG 0x0000
+#define PCH_PHUB_QUEUE_PRI_VAL_REG 0x0004
+#define PCH_PHUB_RC_QUEUE_MAXSIZE_REG 0x0008
+#define PCH_PHUB_BRI_QUEUE_MAXSIZE_REG 0x000C
+#define PCH_PHUB_COMP_RESP_TIMEOUT_REG 0x0010
+#define PCH_PHUB_BUS_SLAVE_CONTROL_REG 0x0014
+#define PCH_PHUB_DEADLOCK_AVOID_TYPE_REG 0x0018
+#define PCH_PHUB_INTPIN_REG_WPERMIT_REG0 0x0020
+#define PCH_PHUB_INTPIN_REG_WPERMIT_REG1 0x0024
+#define PCH_PHUB_INTPIN_REG_WPERMIT_REG2 0x0028
+#define PCH_PHUB_INTPIN_REG_WPERMIT_REG3 0x002C
+#define PCH_PHUB_INT_REDUCE_CONTROL_REG_BASE 0x0040
+#define CLKCFG_REG_OFFSET 0x500
+#define FUNCSEL_REG_OFFSET 0x508
+
+#define PCH_PHUB_OROM_SIZE 15360
+
+/**
+ * struct pch_phub_reg - PHUB register structure
+ * @phub_id_reg: PHUB_ID register val
+ * @q_pri_val_reg: QUEUE_PRI_VAL register val
+ * @rc_q_maxsize_reg: RC_QUEUE_MAXSIZE register val
+ * @bri_q_maxsize_reg: BRI_QUEUE_MAXSIZE register val
+ * @comp_resp_timeout_reg: COMP_RESP_TIMEOUT register val
+ * @bus_slave_control_reg: BUS_SLAVE_CONTROL_REG register val
+ * @deadlock_avoid_type_reg: DEADLOCK_AVOID_TYPE register val
+ * @intpin_reg_wpermit_reg0: INTPIN_REG_WPERMIT register 0 val
+ * @intpin_reg_wpermit_reg1: INTPIN_REG_WPERMIT register 1 val
+ * @intpin_reg_wpermit_reg2: INTPIN_REG_WPERMIT register 2 val
+ * @intpin_reg_wpermit_reg3: INTPIN_REG_WPERMIT register 3 val
+ * @int_reduce_control_reg: INT_REDUCE_CONTROL registers val
+ * @clkcfg_reg: CLK CFG register val
+ * @funcsel_reg: Function select register value
+ * @pch_phub_base_address: Register base address
+ * @pch_phub_extrom_base_address: external rom base address
+ * @pch_mac_start_address: MAC address area start address
+ * @pch_opt_rom_start_address: Option ROM start address
+ * @ioh_type: Save IOH type
+ * @pdev: pointer to pci device struct
+ */
+struct pch_phub_reg {
+ u32 phub_id_reg;
+ u32 q_pri_val_reg;
+ u32 rc_q_maxsize_reg;
+ u32 bri_q_maxsize_reg;
+ u32 comp_resp_timeout_reg;
+ u32 bus_slave_control_reg;
+ u32 deadlock_avoid_type_reg;
+ u32 intpin_reg_wpermit_reg0;
+ u32 intpin_reg_wpermit_reg1;
+ u32 intpin_reg_wpermit_reg2;
+ u32 intpin_reg_wpermit_reg3;
+ u32 int_reduce_control_reg[MAX_NUM_INT_REDUCE_CONTROL_REG];
+ u32 clkcfg_reg;
+ u32 funcsel_reg;
+ void __iomem *pch_phub_base_address;
+ void __iomem *pch_phub_extrom_base_address;
+ u32 pch_mac_start_address;
+ u32 pch_opt_rom_start_address;
+ int ioh_type;
+ struct pci_dev *pdev;
+};
+
+/* SROM SPEC for MAC address assignment offset */
+static const int pch_phub_mac_offset[ETH_ALEN] = {0x3, 0x2, 0x1, 0x0, 0xb, 0xa};
+
+static DEFINE_MUTEX(pch_phub_mutex);
+
+/**
+ * pch_phub_read_modify_write_reg() - Reading modifying and writing register
+ * @reg_addr_offset: Register offset address value.
+ * @data: Writing value.
+ * @mask: Mask value.
+ */
+static void pch_phub_read_modify_write_reg(struct pch_phub_reg *chip,
+ unsigned int reg_addr_offset,
+ unsigned int data, unsigned int mask)
+{
+ void __iomem *reg_addr = chip->pch_phub_base_address + reg_addr_offset;
+ iowrite32(((ioread32(reg_addr) & ~mask)) | data, reg_addr);
+}
+
+/* pch_phub_save_reg_conf - saves register configuration */
+static void pch_phub_save_reg_conf(struct pci_dev *pdev)
+{
+ unsigned int i;
+ struct pch_phub_reg *chip = pci_get_drvdata(pdev);
+
+ void __iomem *p = chip->pch_phub_base_address;
+
+ chip->phub_id_reg = ioread32(p + PCH_PHUB_ID_REG);
+ chip->q_pri_val_reg = ioread32(p + PCH_PHUB_QUEUE_PRI_VAL_REG);
+ chip->rc_q_maxsize_reg = ioread32(p + PCH_PHUB_RC_QUEUE_MAXSIZE_REG);
+ chip->bri_q_maxsize_reg = ioread32(p + PCH_PHUB_BRI_QUEUE_MAXSIZE_REG);
+ chip->comp_resp_timeout_reg =
+ ioread32(p + PCH_PHUB_COMP_RESP_TIMEOUT_REG);
+ chip->bus_slave_control_reg =
+ ioread32(p + PCH_PHUB_BUS_SLAVE_CONTROL_REG);
+ chip->deadlock_avoid_type_reg =
+ ioread32(p + PCH_PHUB_DEADLOCK_AVOID_TYPE_REG);
+ chip->intpin_reg_wpermit_reg0 =
+ ioread32(p + PCH_PHUB_INTPIN_REG_WPERMIT_REG0);
+ chip->intpin_reg_wpermit_reg1 =
+ ioread32(p + PCH_PHUB_INTPIN_REG_WPERMIT_REG1);
+ chip->intpin_reg_wpermit_reg2 =
+ ioread32(p + PCH_PHUB_INTPIN_REG_WPERMIT_REG2);
+ chip->intpin_reg_wpermit_reg3 =
+ ioread32(p + PCH_PHUB_INTPIN_REG_WPERMIT_REG3);
+ dev_dbg(&pdev->dev, "%s : "
+ "chip->phub_id_reg=%x, "
+ "chip->q_pri_val_reg=%x, "
+ "chip->rc_q_maxsize_reg=%x, "
+ "chip->bri_q_maxsize_reg=%x, "
+ "chip->comp_resp_timeout_reg=%x, "
+ "chip->bus_slave_control_reg=%x, "
+ "chip->deadlock_avoid_type_reg=%x, "
+ "chip->intpin_reg_wpermit_reg0=%x, "
+ "chip->intpin_reg_wpermit_reg1=%x, "
+ "chip->intpin_reg_wpermit_reg2=%x, "
+ "chip->intpin_reg_wpermit_reg3=%x\n", __func__,
+ chip->phub_id_reg,
+ chip->q_pri_val_reg,
+ chip->rc_q_maxsize_reg,
+ chip->bri_q_maxsize_reg,
+ chip->comp_resp_timeout_reg,
+ chip->bus_slave_control_reg,
+ chip->deadlock_avoid_type_reg,
+ chip->intpin_reg_wpermit_reg0,
+ chip->intpin_reg_wpermit_reg1,
+ chip->intpin_reg_wpermit_reg2,
+ chip->intpin_reg_wpermit_reg3);
+ for (i = 0; i < MAX_NUM_INT_REDUCE_CONTROL_REG; i++) {
+ chip->int_reduce_control_reg[i] =
+ ioread32(p + PCH_PHUB_INT_REDUCE_CONTROL_REG_BASE + 4 * i);
+ dev_dbg(&pdev->dev, "%s : "
+ "chip->int_reduce_control_reg[%d]=%x\n",
+ __func__, i, chip->int_reduce_control_reg[i]);
+ }
+ chip->clkcfg_reg = ioread32(p + CLKCFG_REG_OFFSET);
+ if ((chip->ioh_type == 2) || (chip->ioh_type == 4))
+ chip->funcsel_reg = ioread32(p + FUNCSEL_REG_OFFSET);
+}
+
+/* pch_phub_restore_reg_conf - restore register configuration */
+static void pch_phub_restore_reg_conf(struct pci_dev *pdev)
+{
+ unsigned int i;
+ struct pch_phub_reg *chip = pci_get_drvdata(pdev);
+ void __iomem *p;
+ p = chip->pch_phub_base_address;
+
+ iowrite32(chip->phub_id_reg, p + PCH_PHUB_ID_REG);
+ iowrite32(chip->q_pri_val_reg, p + PCH_PHUB_QUEUE_PRI_VAL_REG);
+ iowrite32(chip->rc_q_maxsize_reg, p + PCH_PHUB_RC_QUEUE_MAXSIZE_REG);
+ iowrite32(chip->bri_q_maxsize_reg, p + PCH_PHUB_BRI_QUEUE_MAXSIZE_REG);
+ iowrite32(chip->comp_resp_timeout_reg,
+ p + PCH_PHUB_COMP_RESP_TIMEOUT_REG);
+ iowrite32(chip->bus_slave_control_reg,
+ p + PCH_PHUB_BUS_SLAVE_CONTROL_REG);
+ iowrite32(chip->deadlock_avoid_type_reg,
+ p + PCH_PHUB_DEADLOCK_AVOID_TYPE_REG);
+ iowrite32(chip->intpin_reg_wpermit_reg0,
+ p + PCH_PHUB_INTPIN_REG_WPERMIT_REG0);
+ iowrite32(chip->intpin_reg_wpermit_reg1,
+ p + PCH_PHUB_INTPIN_REG_WPERMIT_REG1);
+ iowrite32(chip->intpin_reg_wpermit_reg2,
+ p + PCH_PHUB_INTPIN_REG_WPERMIT_REG2);
+ iowrite32(chip->intpin_reg_wpermit_reg3,
+ p + PCH_PHUB_INTPIN_REG_WPERMIT_REG3);
+ dev_dbg(&pdev->dev, "%s : "
+ "chip->phub_id_reg=%x, "
+ "chip->q_pri_val_reg=%x, "
+ "chip->rc_q_maxsize_reg=%x, "
+ "chip->bri_q_maxsize_reg=%x, "
+ "chip->comp_resp_timeout_reg=%x, "
+ "chip->bus_slave_control_reg=%x, "
+ "chip->deadlock_avoid_type_reg=%x, "
+ "chip->intpin_reg_wpermit_reg0=%x, "
+ "chip->intpin_reg_wpermit_reg1=%x, "
+ "chip->intpin_reg_wpermit_reg2=%x, "
+ "chip->intpin_reg_wpermit_reg3=%x\n", __func__,
+ chip->phub_id_reg,
+ chip->q_pri_val_reg,
+ chip->rc_q_maxsize_reg,
+ chip->bri_q_maxsize_reg,
+ chip->comp_resp_timeout_reg,
+ chip->bus_slave_control_reg,
+ chip->deadlock_avoid_type_reg,
+ chip->intpin_reg_wpermit_reg0,
+ chip->intpin_reg_wpermit_reg1,
+ chip->intpin_reg_wpermit_reg2,
+ chip->intpin_reg_wpermit_reg3);
+ for (i = 0; i < MAX_NUM_INT_REDUCE_CONTROL_REG; i++) {
+ iowrite32(chip->int_reduce_control_reg[i],
+ p + PCH_PHUB_INT_REDUCE_CONTROL_REG_BASE + 4 * i);
+ dev_dbg(&pdev->dev, "%s : "
+ "chip->int_reduce_control_reg[%d]=%x\n",
+ __func__, i, chip->int_reduce_control_reg[i]);
+ }
+
+ iowrite32(chip->clkcfg_reg, p + CLKCFG_REG_OFFSET);
+ if ((chip->ioh_type == 2) || (chip->ioh_type == 4))
+ iowrite32(chip->funcsel_reg, p + FUNCSEL_REG_OFFSET);
+}
+
+/**
+ * pch_phub_read_serial_rom() - Reading Serial ROM
+ * @offset_address: Serial ROM offset address to read.
+ * @data: Read buffer for specified Serial ROM value.
+ */
+static void pch_phub_read_serial_rom(struct pch_phub_reg *chip,
+ unsigned int offset_address, u8 *data)
+{
+ void __iomem *mem_addr = chip->pch_phub_extrom_base_address +
+ offset_address;
+
+ *data = ioread8(mem_addr);
+}
+
+/**
+ * pch_phub_write_serial_rom() - Writing Serial ROM
+ * @offset_address: Serial ROM offset address.
+ * @data: Serial ROM value to write.
+ */
+static int pch_phub_write_serial_rom(struct pch_phub_reg *chip,
+ unsigned int offset_address, u8 data)
+{
+ void __iomem *mem_addr = chip->pch_phub_extrom_base_address +
+ (offset_address & PCH_WORD_ADDR_MASK);
+ int i;
+ unsigned int word_data;
+ unsigned int pos;
+ unsigned int mask;
+ pos = (offset_address % 4) * 8;
+ mask = ~(0xFF << pos);
+
+ iowrite32(PCH_PHUB_ROM_WRITE_ENABLE,
+ chip->pch_phub_extrom_base_address + PHUB_CONTROL);
+
+ word_data = ioread32(mem_addr);
+ iowrite32((word_data & mask) | (u32)data << pos, mem_addr);
+
+ i = 0;
+ while (ioread8(chip->pch_phub_extrom_base_address +
+ PHUB_STATUS) != 0x00) {
+ msleep(1);
+ if (i == PHUB_TIMEOUT)
+ return -ETIMEDOUT;
+ i++;
+ }
+
+ iowrite32(PCH_PHUB_ROM_WRITE_DISABLE,
+ chip->pch_phub_extrom_base_address + PHUB_CONTROL);
+
+ return 0;
+}
+
+/**
+ * pch_phub_read_serial_rom_val() - Read Serial ROM value
+ * @offset_address: Serial ROM address offset value.
+ * @data: Serial ROM value to read.
+ */
+static void pch_phub_read_serial_rom_val(struct pch_phub_reg *chip,
+ unsigned int offset_address, u8 *data)
+{
+ unsigned int mem_addr;
+
+ mem_addr = chip->pch_mac_start_address +
+ pch_phub_mac_offset[offset_address];
+
+ pch_phub_read_serial_rom(chip, mem_addr, data);
+}
+
+/**
+ * pch_phub_write_serial_rom_val() - writing Serial ROM value
+ * @offset_address: Serial ROM address offset value.
+ * @data: Serial ROM value.
+ */
+static int pch_phub_write_serial_rom_val(struct pch_phub_reg *chip,
+ unsigned int offset_address, u8 data)
+{
+ int retval;
+ unsigned int mem_addr;
+
+ mem_addr = chip->pch_mac_start_address +
+ pch_phub_mac_offset[offset_address];
+
+ retval = pch_phub_write_serial_rom(chip, mem_addr, data);
+
+ return retval;
+}
+
+/* pch_phub_gbe_serial_rom_conf - makes Serial ROM header format configuration
+ * for Gigabit Ethernet MAC address
+ */
+static int pch_phub_gbe_serial_rom_conf(struct pch_phub_reg *chip)
+{
+ int retval;
+
+ retval = pch_phub_write_serial_rom(chip, 0x0b, 0xbc);
+ retval |= pch_phub_write_serial_rom(chip, 0x0a, 0x10);
+ retval |= pch_phub_write_serial_rom(chip, 0x09, 0x01);
+ retval |= pch_phub_write_serial_rom(chip, 0x08, 0x02);
+
+ retval |= pch_phub_write_serial_rom(chip, 0x0f, 0x00);
+ retval |= pch_phub_write_serial_rom(chip, 0x0e, 0x00);
+ retval |= pch_phub_write_serial_rom(chip, 0x0d, 0x00);
+ retval |= pch_phub_write_serial_rom(chip, 0x0c, 0x80);
+
+ retval |= pch_phub_write_serial_rom(chip, 0x13, 0xbc);
+ retval |= pch_phub_write_serial_rom(chip, 0x12, 0x10);
+ retval |= pch_phub_write_serial_rom(chip, 0x11, 0x01);
+ retval |= pch_phub_write_serial_rom(chip, 0x10, 0x18);
+
+ retval |= pch_phub_write_serial_rom(chip, 0x1b, 0xbc);
+ retval |= pch_phub_write_serial_rom(chip, 0x1a, 0x10);
+ retval |= pch_phub_write_serial_rom(chip, 0x19, 0x01);
+ retval |= pch_phub_write_serial_rom(chip, 0x18, 0x19);
+
+ retval |= pch_phub_write_serial_rom(chip, 0x23, 0xbc);
+ retval |= pch_phub_write_serial_rom(chip, 0x22, 0x10);
+ retval |= pch_phub_write_serial_rom(chip, 0x21, 0x01);
+ retval |= pch_phub_write_serial_rom(chip, 0x20, 0x3a);
+
+ retval |= pch_phub_write_serial_rom(chip, 0x27, 0x01);
+ retval |= pch_phub_write_serial_rom(chip, 0x26, 0x00);
+ retval |= pch_phub_write_serial_rom(chip, 0x25, 0x00);
+ retval |= pch_phub_write_serial_rom(chip, 0x24, 0x00);
+
+ return retval;
+}
+
+/* pch_phub_gbe_serial_rom_conf_mp - makes SerialROM header format configuration
+ * for Gigabit Ethernet MAC address
+ */
+static int pch_phub_gbe_serial_rom_conf_mp(struct pch_phub_reg *chip)
+{
+ int retval;
+ u32 offset_addr;
+
+ offset_addr = 0x200;
+ retval = pch_phub_write_serial_rom(chip, 0x03 + offset_addr, 0xbc);
+ retval |= pch_phub_write_serial_rom(chip, 0x02 + offset_addr, 0x00);
+ retval |= pch_phub_write_serial_rom(chip, 0x01 + offset_addr, 0x40);
+ retval |= pch_phub_write_serial_rom(chip, 0x00 + offset_addr, 0x02);
+
+ retval |= pch_phub_write_serial_rom(chip, 0x07 + offset_addr, 0x00);
+ retval |= pch_phub_write_serial_rom(chip, 0x06 + offset_addr, 0x00);
+ retval |= pch_phub_write_serial_rom(chip, 0x05 + offset_addr, 0x00);
+ retval |= pch_phub_write_serial_rom(chip, 0x04 + offset_addr, 0x80);
+
+ retval |= pch_phub_write_serial_rom(chip, 0x0b + offset_addr, 0xbc);
+ retval |= pch_phub_write_serial_rom(chip, 0x0a + offset_addr, 0x00);
+ retval |= pch_phub_write_serial_rom(chip, 0x09 + offset_addr, 0x40);
+ retval |= pch_phub_write_serial_rom(chip, 0x08 + offset_addr, 0x18);
+
+ retval |= pch_phub_write_serial_rom(chip, 0x13 + offset_addr, 0xbc);
+ retval |= pch_phub_write_serial_rom(chip, 0x12 + offset_addr, 0x00);
+ retval |= pch_phub_write_serial_rom(chip, 0x11 + offset_addr, 0x40);
+ retval |= pch_phub_write_serial_rom(chip, 0x10 + offset_addr, 0x19);
+
+ retval |= pch_phub_write_serial_rom(chip, 0x1b + offset_addr, 0xbc);
+ retval |= pch_phub_write_serial_rom(chip, 0x1a + offset_addr, 0x00);
+ retval |= pch_phub_write_serial_rom(chip, 0x19 + offset_addr, 0x40);
+ retval |= pch_phub_write_serial_rom(chip, 0x18 + offset_addr, 0x3a);
+
+ retval |= pch_phub_write_serial_rom(chip, 0x1f + offset_addr, 0x01);
+ retval |= pch_phub_write_serial_rom(chip, 0x1e + offset_addr, 0x00);
+ retval |= pch_phub_write_serial_rom(chip, 0x1d + offset_addr, 0x00);
+ retval |= pch_phub_write_serial_rom(chip, 0x1c + offset_addr, 0x00);
+
+ return retval;
+}
+
+/**
+ * pch_phub_read_gbe_mac_addr() - Read Gigabit Ethernet MAC address
+ * @offset_address: Gigabit Ethernet MAC address offset value.
+ * @data: Buffer of the Gigabit Ethernet MAC address value.
+ */
+static void pch_phub_read_gbe_mac_addr(struct pch_phub_reg *chip, u8 *data)
+{
+ int i;
+ for (i = 0; i < ETH_ALEN; i++)
+ pch_phub_read_serial_rom_val(chip, i, &data[i]);
+}
+
+/**
+ * pch_phub_write_gbe_mac_addr() - Write MAC address
+ * @offset_address: Gigabit Ethernet MAC address offset value.
+ * @data: Gigabit Ethernet MAC address value.
+ */
+static int pch_phub_write_gbe_mac_addr(struct pch_phub_reg *chip, u8 *data)
+{
+ int retval;
+ int i;
+
+ if ((chip->ioh_type == 1) || (chip->ioh_type == 5)) /* EG20T or ML7831*/
+ retval = pch_phub_gbe_serial_rom_conf(chip);
+ else /* ML7223 */
+ retval = pch_phub_gbe_serial_rom_conf_mp(chip);
+ if (retval)
+ return retval;
+
+ for (i = 0; i < ETH_ALEN; i++) {
+ retval = pch_phub_write_serial_rom_val(chip, i, data[i]);
+ if (retval)
+ return retval;
+ }
+
+ return retval;
+}
+
+static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ unsigned int rom_signature;
+ unsigned char rom_length;
+ unsigned int tmp;
+ unsigned int addr_offset;
+ unsigned int orom_size;
+ int ret;
+ int err;
+ ssize_t rom_size;
+
+ struct pch_phub_reg *chip =
+ dev_get_drvdata(container_of(kobj, struct device, kobj));
+
+ ret = mutex_lock_interruptible(&pch_phub_mutex);
+ if (ret) {
+ err = -ERESTARTSYS;
+ goto return_err_nomutex;
+ }
+
+ /* Get Rom signature */
+ chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size);
+ if (!chip->pch_phub_extrom_base_address)
+ goto exrom_map_err;
+
+ pch_phub_read_serial_rom(chip, chip->pch_opt_rom_start_address,
+ (unsigned char *)&rom_signature);
+ rom_signature &= 0xff;
+ pch_phub_read_serial_rom(chip, chip->pch_opt_rom_start_address + 1,
+ (unsigned char *)&tmp);
+ rom_signature |= (tmp & 0xff) << 8;
+ if (rom_signature == 0xAA55) {
+ pch_phub_read_serial_rom(chip,
+ chip->pch_opt_rom_start_address + 2,
+ &rom_length);
+ orom_size = rom_length * 512;
+ if (orom_size < off) {
+ addr_offset = 0;
+ goto return_ok;
+ }
+ if (orom_size < count) {
+ addr_offset = 0;
+ goto return_ok;
+ }
+
+ for (addr_offset = 0; addr_offset < count; addr_offset++) {
+ pch_phub_read_serial_rom(chip,
+ chip->pch_opt_rom_start_address + addr_offset + off,
+ &buf[addr_offset]);
+ }
+ } else {
+ err = -ENODATA;
+ goto return_err;
+ }
+return_ok:
+ pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
+ mutex_unlock(&pch_phub_mutex);
+ return addr_offset;
+
+return_err:
+ pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
+exrom_map_err:
+ mutex_unlock(&pch_phub_mutex);
+return_err_nomutex:
+ return err;
+}
+
+static ssize_t pch_phub_bin_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ int err;
+ unsigned int addr_offset;
+ int ret;
+ ssize_t rom_size;
+ struct pch_phub_reg *chip =
+ dev_get_drvdata(container_of(kobj, struct device, kobj));
+
+ ret = mutex_lock_interruptible(&pch_phub_mutex);
+ if (ret)
+ return -ERESTARTSYS;
+
+ if (off > PCH_PHUB_OROM_SIZE) {
+ addr_offset = 0;
+ goto return_ok;
+ }
+ if (count > PCH_PHUB_OROM_SIZE) {
+ addr_offset = 0;
+ goto return_ok;
+ }
+
+ chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size);
+ if (!chip->pch_phub_extrom_base_address) {
+ err = -ENOMEM;
+ goto exrom_map_err;
+ }
+
+ for (addr_offset = 0; addr_offset < count; addr_offset++) {
+ if (PCH_PHUB_OROM_SIZE < off + addr_offset)
+ goto return_ok;
+
+ ret = pch_phub_write_serial_rom(chip,
+ chip->pch_opt_rom_start_address + addr_offset + off,
+ buf[addr_offset]);
+ if (ret) {
+ err = ret;
+ goto return_err;
+ }
+ }
+
+return_ok:
+ pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
+ mutex_unlock(&pch_phub_mutex);
+ return addr_offset;
+
+return_err:
+ pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
+
+exrom_map_err:
+ mutex_unlock(&pch_phub_mutex);
+ return err;
+}
+
+static ssize_t show_pch_mac(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ u8 mac[8];
+ struct pch_phub_reg *chip = dev_get_drvdata(dev);
+ ssize_t rom_size;
+
+ chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size);
+ if (!chip->pch_phub_extrom_base_address)
+ return -ENOMEM;
+
+ pch_phub_read_gbe_mac_addr(chip, mac);
+ pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
+
+ return sprintf(buf, "%pM\n", mac);
+}
+
+static ssize_t store_pch_mac(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u8 mac[6];
+ ssize_t rom_size;
+ struct pch_phub_reg *chip = dev_get_drvdata(dev);
+
+ if (count != 18)
+ return -EINVAL;
+
+ sscanf(buf, "%02x:%02x:%02x:%02x:%02x:%02x",
+ (u32 *)&mac[0], (u32 *)&mac[1], (u32 *)&mac[2], (u32 *)&mac[3],
+ (u32 *)&mac[4], (u32 *)&mac[5]);
+
+ chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size);
+ if (!chip->pch_phub_extrom_base_address)
+ return -ENOMEM;
+
+ pch_phub_write_gbe_mac_addr(chip, mac);
+ pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
+
+ return count;
+}
+
+static DEVICE_ATTR(pch_mac, S_IRUGO | S_IWUSR, show_pch_mac, store_pch_mac);
+
+static struct bin_attribute pch_bin_attr = {
+ .attr = {
+ .name = "pch_firmware",
+ .mode = S_IRUGO | S_IWUSR,
+ },
+ .size = PCH_PHUB_OROM_SIZE + 1,
+ .read = pch_phub_bin_read,
+ .write = pch_phub_bin_write,
+};
+
+static int __devinit pch_phub_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int retval;
+
+ int ret;
+ struct pch_phub_reg *chip;
+
+ chip = kzalloc(sizeof(struct pch_phub_reg), GFP_KERNEL);
+ if (chip == NULL)
+ return -ENOMEM;
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "%s : pci_enable_device FAILED(ret=%d)", __func__, ret);
+ goto err_pci_enable_dev;
+ }
+ dev_dbg(&pdev->dev, "%s : pci_enable_device returns %d\n", __func__,
+ ret);
+
+ ret = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "%s : pci_request_regions FAILED(ret=%d)", __func__, ret);
+ goto err_req_regions;
+ }
+ dev_dbg(&pdev->dev, "%s : "
+ "pci_request_regions returns %d\n", __func__, ret);
+
+ chip->pch_phub_base_address = pci_iomap(pdev, 1, 0);
+
+
+ if (chip->pch_phub_base_address == 0) {
+ dev_err(&pdev->dev, "%s : pci_iomap FAILED", __func__);
+ ret = -ENOMEM;
+ goto err_pci_iomap;
+ }
+ dev_dbg(&pdev->dev, "%s : pci_iomap SUCCESS and value "
+ "in pch_phub_base_address variable is %p\n", __func__,
+ chip->pch_phub_base_address);
+
+ chip->pdev = pdev; /* Save pci device struct */
+
+ if (id->driver_data == 1) { /* EG20T PCH */
+ const char *board_name;
+
+ retval = sysfs_create_file(&pdev->dev.kobj,
+ &dev_attr_pch_mac.attr);
+ if (retval)
+ goto err_sysfs_create;
+
+ retval = sysfs_create_bin_file(&pdev->dev.kobj, &pch_bin_attr);
+ if (retval)
+ goto exit_bin_attr;
+
+ pch_phub_read_modify_write_reg(chip,
+ (unsigned int)CLKCFG_REG_OFFSET,
+ CLKCFG_CAN_50MHZ,
+ CLKCFG_CANCLK_MASK);
+
+ /* quirk for CM-iTC board */
+ board_name = dmi_get_system_info(DMI_BOARD_NAME);
+ if (board_name && strstr(board_name, "CM-iTC"))
+ pch_phub_read_modify_write_reg(chip,
+ (unsigned int)CLKCFG_REG_OFFSET,
+ CLKCFG_UART_48MHZ | CLKCFG_BAUDDIV |
+ CLKCFG_PLL2VCO | CLKCFG_UARTCLKSEL,
+ CLKCFG_UART_MASK);
+
+ /* set the prefech value */
+ iowrite32(0x000affaa, chip->pch_phub_base_address + 0x14);
+ /* set the interrupt delay value */
+ iowrite32(0x25, chip->pch_phub_base_address + 0x44);
+ chip->pch_opt_rom_start_address = PCH_PHUB_ROM_START_ADDR_EG20T;
+ chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_EG20T;
+ } else if (id->driver_data == 2) { /* ML7213 IOH */
+ retval = sysfs_create_bin_file(&pdev->dev.kobj, &pch_bin_attr);
+ if (retval)
+ goto err_sysfs_create;
+ /* set the prefech value
+ * Device2(USB OHCI #1/ USB EHCI #1/ USB Device):a
+ * Device4(SDIO #0,1,2):f
+ * Device6(SATA 2):f
+ * Device8(USB OHCI #0/ USB EHCI #0):a
+ */
+ iowrite32(0x000affa0, chip->pch_phub_base_address + 0x14);
+ chip->pch_opt_rom_start_address =\
+ PCH_PHUB_ROM_START_ADDR_ML7213;
+ } else if (id->driver_data == 3) { /* ML7223 IOH Bus-m*/
+ /* set the prefech value
+ * Device8(GbE)
+ */
+ iowrite32(0x000a0000, chip->pch_phub_base_address + 0x14);
+ /* set the interrupt delay value */
+ iowrite32(0x25, chip->pch_phub_base_address + 0x140);
+ chip->pch_opt_rom_start_address =\
+ PCH_PHUB_ROM_START_ADDR_ML7223;
+ chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_ML7223;
+ } else if (id->driver_data == 4) { /* ML7223 IOH Bus-n*/
+ retval = sysfs_create_file(&pdev->dev.kobj,
+ &dev_attr_pch_mac.attr);
+ if (retval)
+ goto err_sysfs_create;
+ retval = sysfs_create_bin_file(&pdev->dev.kobj, &pch_bin_attr);
+ if (retval)
+ goto exit_bin_attr;
+ /* set the prefech value
+ * Device2(USB OHCI #0,1,2,3/ USB EHCI #0):a
+ * Device4(SDIO #0,1):f
+ * Device6(SATA 2):f
+ */
+ iowrite32(0x0000ffa0, chip->pch_phub_base_address + 0x14);
+ chip->pch_opt_rom_start_address =\
+ PCH_PHUB_ROM_START_ADDR_ML7223;
+ chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_ML7223;
+ } else if (id->driver_data == 5) { /* ML7831 */
+ retval = sysfs_create_file(&pdev->dev.kobj,
+ &dev_attr_pch_mac.attr);
+ if (retval)
+ goto err_sysfs_create;
+
+ retval = sysfs_create_bin_file(&pdev->dev.kobj, &pch_bin_attr);
+ if (retval)
+ goto exit_bin_attr;
+
+ /* set the prefech value */
+ iowrite32(0x000affaa, chip->pch_phub_base_address + 0x14);
+ /* set the interrupt delay value */
+ iowrite32(0x25, chip->pch_phub_base_address + 0x44);
+ chip->pch_opt_rom_start_address = PCH_PHUB_ROM_START_ADDR_EG20T;
+ chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_EG20T;
+ }
+
+ chip->ioh_type = id->driver_data;
+ pci_set_drvdata(pdev, chip);
+
+ return 0;
+exit_bin_attr:
+ sysfs_remove_file(&pdev->dev.kobj, &dev_attr_pch_mac.attr);
+
+err_sysfs_create:
+ pci_iounmap(pdev, chip->pch_phub_base_address);
+err_pci_iomap:
+ pci_release_regions(pdev);
+err_req_regions:
+ pci_disable_device(pdev);
+err_pci_enable_dev:
+ kfree(chip);
+ dev_err(&pdev->dev, "%s returns %d\n", __func__, ret);
+ return ret;
+}
+
+static void __devexit pch_phub_remove(struct pci_dev *pdev)
+{
+ struct pch_phub_reg *chip = pci_get_drvdata(pdev);
+
+ sysfs_remove_file(&pdev->dev.kobj, &dev_attr_pch_mac.attr);
+ sysfs_remove_bin_file(&pdev->dev.kobj, &pch_bin_attr);
+ pci_iounmap(pdev, chip->pch_phub_base_address);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ kfree(chip);
+}
+
+#ifdef CONFIG_PM
+
+static int pch_phub_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ int ret;
+
+ pch_phub_save_reg_conf(pdev);
+ ret = pci_save_state(pdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ " %s -pci_save_state returns %d\n", __func__, ret);
+ return ret;
+ }
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ return 0;
+}
+
+static int pch_phub_resume(struct pci_dev *pdev)
+{
+ int ret;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "%s-pci_enable_device failed(ret=%d) ", __func__, ret);
+ return ret;
+ }
+
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pch_phub_restore_reg_conf(pdev);
+
+ return 0;
+}
+#else
+#define pch_phub_suspend NULL
+#define pch_phub_resume NULL
+#endif /* CONFIG_PM */
+
+static struct pci_device_id pch_phub_pcidev_id[] = {
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH1_PHUB), 1, },
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7213_PHUB), 2, },
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7223_mPHUB), 3, },
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7223_nPHUB), 4, },
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7831_PHUB), 5, },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, pch_phub_pcidev_id);
+
+static struct pci_driver pch_phub_driver = {
+ .name = "pch_phub",
+ .id_table = pch_phub_pcidev_id,
+ .probe = pch_phub_probe,
+ .remove = __devexit_p(pch_phub_remove),
+ .suspend = pch_phub_suspend,
+ .resume = pch_phub_resume
+};
+
+static int __init pch_phub_pci_init(void)
+{
+ return pci_register_driver(&pch_phub_driver);
+}
+
+static void __exit pch_phub_pci_exit(void)
+{
+ pci_unregister_driver(&pch_phub_driver);
+}
+
+module_init(pch_phub_pci_init);
+module_exit(pch_phub_pci_exit);
+
+MODULE_DESCRIPTION("Intel EG20T PCH/LAPIS Semiconductor IOH(ML7213/ML7223) PHUB");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/phantom.c b/drivers/misc/phantom.c
new file mode 100644
index 00000000..21b28fc6
--- /dev/null
+++ b/drivers/misc/phantom.c
@@ -0,0 +1,571 @@
+/*
+ * Copyright (C) 2005-2007 Jiri Slaby <jirislaby@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * You need a userspace library to cooperate with this driver. It (and other
+ * info) may be obtained here:
+ * http://www.fi.muni.cz/~xslaby/phantom.html
+ * or alternatively, you might use OpenHaptics provided by Sensable.
+ */
+
+#include <linux/compat.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include <linux/interrupt.h>
+#include <linux/cdev.h>
+#include <linux/slab.h>
+#include <linux/phantom.h>
+#include <linux/sched.h>
+#include <linux/mutex.h>
+
+#include <linux/atomic.h>
+#include <asm/io.h>
+
+#define PHANTOM_VERSION "n0.9.8"
+
+#define PHANTOM_MAX_MINORS 8
+
+#define PHN_IRQCTL 0x4c /* irq control in caddr space */
+
+#define PHB_RUNNING 1
+#define PHB_NOT_OH 2
+
+static DEFINE_MUTEX(phantom_mutex);
+static struct class *phantom_class;
+static int phantom_major;
+
+struct phantom_device {
+ unsigned int opened;
+ void __iomem *caddr;
+ u32 __iomem *iaddr;
+ u32 __iomem *oaddr;
+ unsigned long status;
+ atomic_t counter;
+
+ wait_queue_head_t wait;
+ struct cdev cdev;
+
+ struct mutex open_lock;
+ spinlock_t regs_lock;
+
+ /* used in NOT_OH mode */
+ struct phm_regs oregs;
+ u32 ctl_reg;
+};
+
+static unsigned char phantom_devices[PHANTOM_MAX_MINORS];
+
+static int phantom_status(struct phantom_device *dev, unsigned long newstat)
+{
+ pr_debug("phantom_status %lx %lx\n", dev->status, newstat);
+
+ if (!(dev->status & PHB_RUNNING) && (newstat & PHB_RUNNING)) {
+ atomic_set(&dev->counter, 0);
+ iowrite32(PHN_CTL_IRQ, dev->iaddr + PHN_CONTROL);
+ iowrite32(0x43, dev->caddr + PHN_IRQCTL);
+ ioread32(dev->caddr + PHN_IRQCTL); /* PCI posting */
+ } else if ((dev->status & PHB_RUNNING) && !(newstat & PHB_RUNNING)) {
+ iowrite32(0, dev->caddr + PHN_IRQCTL);
+ ioread32(dev->caddr + PHN_IRQCTL); /* PCI posting */
+ }
+
+ dev->status = newstat;
+
+ return 0;
+}
+
+/*
+ * File ops
+ */
+
+static long phantom_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct phantom_device *dev = file->private_data;
+ struct phm_regs rs;
+ struct phm_reg r;
+ void __user *argp = (void __user *)arg;
+ unsigned long flags;
+ unsigned int i;
+
+ switch (cmd) {
+ case PHN_SETREG:
+ case PHN_SET_REG:
+ if (copy_from_user(&r, argp, sizeof(r)))
+ return -EFAULT;
+
+ if (r.reg > 7)
+ return -EINVAL;
+
+ spin_lock_irqsave(&dev->regs_lock, flags);
+ if (r.reg == PHN_CONTROL && (r.value & PHN_CTL_IRQ) &&
+ phantom_status(dev, dev->status | PHB_RUNNING)){
+ spin_unlock_irqrestore(&dev->regs_lock, flags);
+ return -ENODEV;
+ }
+
+ pr_debug("phantom: writing %x to %u\n", r.value, r.reg);
+
+ /* preserve amp bit (don't allow to change it when in NOT_OH) */
+ if (r.reg == PHN_CONTROL && (dev->status & PHB_NOT_OH)) {
+ r.value &= ~PHN_CTL_AMP;
+ r.value |= dev->ctl_reg & PHN_CTL_AMP;
+ dev->ctl_reg = r.value;
+ }
+
+ iowrite32(r.value, dev->iaddr + r.reg);
+ ioread32(dev->iaddr); /* PCI posting */
+
+ if (r.reg == PHN_CONTROL && !(r.value & PHN_CTL_IRQ))
+ phantom_status(dev, dev->status & ~PHB_RUNNING);
+ spin_unlock_irqrestore(&dev->regs_lock, flags);
+ break;
+ case PHN_SETREGS:
+ case PHN_SET_REGS:
+ if (copy_from_user(&rs, argp, sizeof(rs)))
+ return -EFAULT;
+
+ pr_debug("phantom: SRS %u regs %x\n", rs.count, rs.mask);
+ spin_lock_irqsave(&dev->regs_lock, flags);
+ if (dev->status & PHB_NOT_OH)
+ memcpy(&dev->oregs, &rs, sizeof(rs));
+ else {
+ u32 m = min(rs.count, 8U);
+ for (i = 0; i < m; i++)
+ if (rs.mask & BIT(i))
+ iowrite32(rs.values[i], dev->oaddr + i);
+ ioread32(dev->iaddr); /* PCI posting */
+ }
+ spin_unlock_irqrestore(&dev->regs_lock, flags);
+ break;
+ case PHN_GETREG:
+ case PHN_GET_REG:
+ if (copy_from_user(&r, argp, sizeof(r)))
+ return -EFAULT;
+
+ if (r.reg > 7)
+ return -EINVAL;
+
+ r.value = ioread32(dev->iaddr + r.reg);
+
+ if (copy_to_user(argp, &r, sizeof(r)))
+ return -EFAULT;
+ break;
+ case PHN_GETREGS:
+ case PHN_GET_REGS: {
+ u32 m;
+
+ if (copy_from_user(&rs, argp, sizeof(rs)))
+ return -EFAULT;
+
+ m = min(rs.count, 8U);
+
+ pr_debug("phantom: GRS %u regs %x\n", rs.count, rs.mask);
+ spin_lock_irqsave(&dev->regs_lock, flags);
+ for (i = 0; i < m; i++)
+ if (rs.mask & BIT(i))
+ rs.values[i] = ioread32(dev->iaddr + i);
+ atomic_set(&dev->counter, 0);
+ spin_unlock_irqrestore(&dev->regs_lock, flags);
+
+ if (copy_to_user(argp, &rs, sizeof(rs)))
+ return -EFAULT;
+ break;
+ } case PHN_NOT_OH:
+ spin_lock_irqsave(&dev->regs_lock, flags);
+ if (dev->status & PHB_RUNNING) {
+ printk(KERN_ERR "phantom: you need to set NOT_OH "
+ "before you start the device!\n");
+ spin_unlock_irqrestore(&dev->regs_lock, flags);
+ return -EINVAL;
+ }
+ dev->status |= PHB_NOT_OH;
+ spin_unlock_irqrestore(&dev->regs_lock, flags);
+ break;
+ default:
+ return -ENOTTY;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_COMPAT
+static long phantom_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ if (_IOC_NR(cmd) <= 3 && _IOC_SIZE(cmd) == sizeof(compat_uptr_t)) {
+ cmd &= ~(_IOC_SIZEMASK << _IOC_SIZESHIFT);
+ cmd |= sizeof(void *) << _IOC_SIZESHIFT;
+ }
+ return phantom_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
+}
+#else
+#define phantom_compat_ioctl NULL
+#endif
+
+static int phantom_open(struct inode *inode, struct file *file)
+{
+ struct phantom_device *dev = container_of(inode->i_cdev,
+ struct phantom_device, cdev);
+
+ mutex_lock(&phantom_mutex);
+ nonseekable_open(inode, file);
+
+ if (mutex_lock_interruptible(&dev->open_lock)) {
+ mutex_unlock(&phantom_mutex);
+ return -ERESTARTSYS;
+ }
+
+ if (dev->opened) {
+ mutex_unlock(&dev->open_lock);
+ mutex_unlock(&phantom_mutex);
+ return -EINVAL;
+ }
+
+ WARN_ON(dev->status & PHB_NOT_OH);
+
+ file->private_data = dev;
+
+ atomic_set(&dev->counter, 0);
+ dev->opened++;
+ mutex_unlock(&dev->open_lock);
+ mutex_unlock(&phantom_mutex);
+ return 0;
+}
+
+static int phantom_release(struct inode *inode, struct file *file)
+{
+ struct phantom_device *dev = file->private_data;
+
+ mutex_lock(&dev->open_lock);
+
+ dev->opened = 0;
+ phantom_status(dev, dev->status & ~PHB_RUNNING);
+ dev->status &= ~PHB_NOT_OH;
+
+ mutex_unlock(&dev->open_lock);
+
+ return 0;
+}
+
+static unsigned int phantom_poll(struct file *file, poll_table *wait)
+{
+ struct phantom_device *dev = file->private_data;
+ unsigned int mask = 0;
+
+ pr_debug("phantom_poll: %d\n", atomic_read(&dev->counter));
+ poll_wait(file, &dev->wait, wait);
+
+ if (!(dev->status & PHB_RUNNING))
+ mask = POLLERR;
+ else if (atomic_read(&dev->counter))
+ mask = POLLIN | POLLRDNORM;
+
+ pr_debug("phantom_poll end: %x/%d\n", mask, atomic_read(&dev->counter));
+
+ return mask;
+}
+
+static const struct file_operations phantom_file_ops = {
+ .open = phantom_open,
+ .release = phantom_release,
+ .unlocked_ioctl = phantom_ioctl,
+ .compat_ioctl = phantom_compat_ioctl,
+ .poll = phantom_poll,
+ .llseek = no_llseek,
+};
+
+static irqreturn_t phantom_isr(int irq, void *data)
+{
+ struct phantom_device *dev = data;
+ unsigned int i;
+ u32 ctl;
+
+ spin_lock(&dev->regs_lock);
+ ctl = ioread32(dev->iaddr + PHN_CONTROL);
+ if (!(ctl & PHN_CTL_IRQ)) {
+ spin_unlock(&dev->regs_lock);
+ return IRQ_NONE;
+ }
+
+ iowrite32(0, dev->iaddr);
+ iowrite32(0xc0, dev->iaddr);
+
+ if (dev->status & PHB_NOT_OH) {
+ struct phm_regs *r = &dev->oregs;
+ u32 m = min(r->count, 8U);
+
+ for (i = 0; i < m; i++)
+ if (r->mask & BIT(i))
+ iowrite32(r->values[i], dev->oaddr + i);
+
+ dev->ctl_reg ^= PHN_CTL_AMP;
+ iowrite32(dev->ctl_reg, dev->iaddr + PHN_CONTROL);
+ }
+ spin_unlock(&dev->regs_lock);
+
+ ioread32(dev->iaddr); /* PCI posting */
+
+ atomic_inc(&dev->counter);
+ wake_up_interruptible(&dev->wait);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Init and deinit driver
+ */
+
+static unsigned int __devinit phantom_get_free(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < PHANTOM_MAX_MINORS; i++)
+ if (phantom_devices[i] == 0)
+ break;
+
+ return i;
+}
+
+static int __devinit phantom_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_id)
+{
+ struct phantom_device *pht;
+ unsigned int minor;
+ int retval;
+
+ retval = pci_enable_device(pdev);
+ if (retval) {
+ dev_err(&pdev->dev, "pci_enable_device failed!\n");
+ goto err;
+ }
+
+ minor = phantom_get_free();
+ if (minor == PHANTOM_MAX_MINORS) {
+ dev_err(&pdev->dev, "too many devices found!\n");
+ retval = -EIO;
+ goto err_dis;
+ }
+
+ phantom_devices[minor] = 1;
+
+ retval = pci_request_regions(pdev, "phantom");
+ if (retval) {
+ dev_err(&pdev->dev, "pci_request_regions failed!\n");
+ goto err_null;
+ }
+
+ retval = -ENOMEM;
+ pht = kzalloc(sizeof(*pht), GFP_KERNEL);
+ if (pht == NULL) {
+ dev_err(&pdev->dev, "unable to allocate device\n");
+ goto err_reg;
+ }
+
+ pht->caddr = pci_iomap(pdev, 0, 0);
+ if (pht->caddr == NULL) {
+ dev_err(&pdev->dev, "can't remap conf space\n");
+ goto err_fr;
+ }
+ pht->iaddr = pci_iomap(pdev, 2, 0);
+ if (pht->iaddr == NULL) {
+ dev_err(&pdev->dev, "can't remap input space\n");
+ goto err_unmc;
+ }
+ pht->oaddr = pci_iomap(pdev, 3, 0);
+ if (pht->oaddr == NULL) {
+ dev_err(&pdev->dev, "can't remap output space\n");
+ goto err_unmi;
+ }
+
+ mutex_init(&pht->open_lock);
+ spin_lock_init(&pht->regs_lock);
+ init_waitqueue_head(&pht->wait);
+ cdev_init(&pht->cdev, &phantom_file_ops);
+ pht->cdev.owner = THIS_MODULE;
+
+ iowrite32(0, pht->caddr + PHN_IRQCTL);
+ ioread32(pht->caddr + PHN_IRQCTL); /* PCI posting */
+ retval = request_irq(pdev->irq, phantom_isr,
+ IRQF_SHARED | IRQF_DISABLED, "phantom", pht);
+ if (retval) {
+ dev_err(&pdev->dev, "can't establish ISR\n");
+ goto err_unmo;
+ }
+
+ retval = cdev_add(&pht->cdev, MKDEV(phantom_major, minor), 1);
+ if (retval) {
+ dev_err(&pdev->dev, "chardev registration failed\n");
+ goto err_irq;
+ }
+
+ if (IS_ERR(device_create(phantom_class, &pdev->dev,
+ MKDEV(phantom_major, minor), NULL,
+ "phantom%u", minor)))
+ dev_err(&pdev->dev, "can't create device\n");
+
+ pci_set_drvdata(pdev, pht);
+
+ return 0;
+err_irq:
+ free_irq(pdev->irq, pht);
+err_unmo:
+ pci_iounmap(pdev, pht->oaddr);
+err_unmi:
+ pci_iounmap(pdev, pht->iaddr);
+err_unmc:
+ pci_iounmap(pdev, pht->caddr);
+err_fr:
+ kfree(pht);
+err_reg:
+ pci_release_regions(pdev);
+err_null:
+ phantom_devices[minor] = 0;
+err_dis:
+ pci_disable_device(pdev);
+err:
+ return retval;
+}
+
+static void __devexit phantom_remove(struct pci_dev *pdev)
+{
+ struct phantom_device *pht = pci_get_drvdata(pdev);
+ unsigned int minor = MINOR(pht->cdev.dev);
+
+ device_destroy(phantom_class, MKDEV(phantom_major, minor));
+
+ cdev_del(&pht->cdev);
+
+ iowrite32(0, pht->caddr + PHN_IRQCTL);
+ ioread32(pht->caddr + PHN_IRQCTL); /* PCI posting */
+ free_irq(pdev->irq, pht);
+
+ pci_iounmap(pdev, pht->oaddr);
+ pci_iounmap(pdev, pht->iaddr);
+ pci_iounmap(pdev, pht->caddr);
+
+ kfree(pht);
+
+ pci_release_regions(pdev);
+
+ phantom_devices[minor] = 0;
+
+ pci_disable_device(pdev);
+}
+
+#ifdef CONFIG_PM
+static int phantom_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct phantom_device *dev = pci_get_drvdata(pdev);
+
+ iowrite32(0, dev->caddr + PHN_IRQCTL);
+ ioread32(dev->caddr + PHN_IRQCTL); /* PCI posting */
+
+ synchronize_irq(pdev->irq);
+
+ return 0;
+}
+
+static int phantom_resume(struct pci_dev *pdev)
+{
+ struct phantom_device *dev = pci_get_drvdata(pdev);
+
+ iowrite32(0, dev->caddr + PHN_IRQCTL);
+
+ return 0;
+}
+#else
+#define phantom_suspend NULL
+#define phantom_resume NULL
+#endif
+
+static struct pci_device_id phantom_pci_tbl[] __devinitdata = {
+ { .vendor = PCI_VENDOR_ID_PLX, .device = PCI_DEVICE_ID_PLX_9050,
+ .subvendor = PCI_VENDOR_ID_PLX, .subdevice = PCI_DEVICE_ID_PLX_9050,
+ .class = PCI_CLASS_BRIDGE_OTHER << 8, .class_mask = 0xffff00 },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, phantom_pci_tbl);
+
+static struct pci_driver phantom_pci_driver = {
+ .name = "phantom",
+ .id_table = phantom_pci_tbl,
+ .probe = phantom_probe,
+ .remove = __devexit_p(phantom_remove),
+ .suspend = phantom_suspend,
+ .resume = phantom_resume
+};
+
+static CLASS_ATTR_STRING(version, 0444, PHANTOM_VERSION);
+
+static int __init phantom_init(void)
+{
+ int retval;
+ dev_t dev;
+
+ phantom_class = class_create(THIS_MODULE, "phantom");
+ if (IS_ERR(phantom_class)) {
+ retval = PTR_ERR(phantom_class);
+ printk(KERN_ERR "phantom: can't register phantom class\n");
+ goto err;
+ }
+ retval = class_create_file(phantom_class, &class_attr_version.attr);
+ if (retval) {
+ printk(KERN_ERR "phantom: can't create sysfs version file\n");
+ goto err_class;
+ }
+
+ retval = alloc_chrdev_region(&dev, 0, PHANTOM_MAX_MINORS, "phantom");
+ if (retval) {
+ printk(KERN_ERR "phantom: can't register character device\n");
+ goto err_attr;
+ }
+ phantom_major = MAJOR(dev);
+
+ retval = pci_register_driver(&phantom_pci_driver);
+ if (retval) {
+ printk(KERN_ERR "phantom: can't register pci driver\n");
+ goto err_unchr;
+ }
+
+ printk(KERN_INFO "Phantom Linux Driver, version " PHANTOM_VERSION ", "
+ "init OK\n");
+
+ return 0;
+err_unchr:
+ unregister_chrdev_region(dev, PHANTOM_MAX_MINORS);
+err_attr:
+ class_remove_file(phantom_class, &class_attr_version.attr);
+err_class:
+ class_destroy(phantom_class);
+err:
+ return retval;
+}
+
+static void __exit phantom_exit(void)
+{
+ pci_unregister_driver(&phantom_pci_driver);
+
+ unregister_chrdev_region(MKDEV(phantom_major, 0), PHANTOM_MAX_MINORS);
+
+ class_remove_file(phantom_class, &class_attr_version.attr);
+ class_destroy(phantom_class);
+
+ pr_debug("phantom: module successfully removed\n");
+}
+
+module_init(phantom_init);
+module_exit(phantom_exit);
+
+MODULE_AUTHOR("Jiri Slaby <jirislaby@gmail.com>");
+MODULE_DESCRIPTION("Sensable Phantom driver (PCI devices)");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(PHANTOM_VERSION);
diff --git a/drivers/misc/pn547.c b/drivers/misc/pn547.c
new file mode 100755
index 00000000..ff819341
--- /dev/null
+++ b/drivers/misc/pn547.c
@@ -0,0 +1,1180 @@
+/*
+ * Copyright (C) 2010 Trusted Logic S.A.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/i2c.h>
+#include <linux/irq.h>
+#include <linux/jiffies.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/miscdevice.h>
+#include <linux/spinlock.h>
+#include <linux/nfc/pn544.h>
+
+#include <mach/wmt_iomux.h>
+
+#include <mach/hardware.h>
+//#include <plat/gpio-core.h>
+//#include <plat/gpio-cfg.h>
+//#include <plat/gpio-cfg-helpers.h>
+
+
+#undef pr_err
+#define pr_err printk
+//#define pr_debug printk
+//#define pr_warning printk
+
+#define DRIVER_DESC "NFC driver for PN544"
+
+#define CLIENT_ADDR 0x28 //0x2b mod 2014-7-10
+#define WMT_PN544_I2C_CHANNEL 0
+
+struct i2c_client *pn544_client;
+static int g_chip_nr = 7;
+
+#define MAX_BUFFER_SIZE 512
+
+extern int wmt_getsyspara(char *varname, unsigned char *varval, int *varlen);
+
+struct pn544_dev {
+ wait_queue_head_t read_wq;
+ struct mutex read_mutex;
+ struct i2c_client *client;
+ struct miscdevice pn544_device;
+
+
+ int ven_gpio;
+ int ven_active;
+ int ven_on_off;
+
+ int firm_gpio;
+ int firm_active;
+
+ int irq_gpio;
+ int irq_active;
+
+
+ bool irq_enabled;
+ spinlock_t irq_enabled_lock;
+};
+
+//**********************add 2014-7-16
+static int irq_gpio = 0;
+
+void wmt_clr_int(void)
+{
+ int num = irq_gpio;
+
+ int reg_shift = 20;
+ if (num > /*11*/42)
+ {
+ return;
+ }
+
+ if (num < 20)
+ {
+ REG32_VAL(__GPIO_BASE+0x0360) = 1<<num;
+ }
+ else
+ {
+ switch (num)
+ {
+ case WMT_PIN_GP5_VDOUT11:
+ REG32_VAL(__GPIO_BASE+0x0360) = 1<<reg_shift;
+ break;
+ case WMT_PIN_GP5_VDOUT12:
+ REG32_VAL(__GPIO_BASE+0x0360) = 1<<(reg_shift+1);
+ break;
+ case WMT_PIN_GP6_VDOUT18:
+ REG32_VAL(__GPIO_BASE+0x0360) = 1<<(reg_shift+2);
+ break;
+ case WMT_PIN_GP6_VDOUT19:
+ REG32_VAL(__GPIO_BASE+0x0360) = 1<<(reg_shift+3);
+ break;
+ case WMT_PIN_GP6_VDOUT20:
+ REG32_VAL(__GPIO_BASE+0x0360) = 1<<(reg_shift+4);
+ break;
+ case WMT_PIN_GP6_VDOUT21:
+ REG32_VAL(__GPIO_BASE+0x0360) = 1<<(reg_shift+5);
+ break;
+
+ }
+ }
+ //gpio 0-19 vdout11 12 18:21
+ //REG32_VAL(__GPIO_BASE+0x0360) = 1<<num; //interrupt status register ,1:active 0:inactive
+ // write 1:to clear
+}
+
+void wmt_set_irqinput(void)
+{
+ int num = irq_gpio;
+
+ if (num > /*19*/42)
+ return;
+ if (num < 20)
+ {
+ REG32_VAL(__GPIO_BASE+0x0040) |= (1<<num); //enable gpio
+ REG32_VAL(__GPIO_BASE+0x0080) &= ~(1<<num); //set input
+ }
+
+ //GPIO GP5 Enable Register for VDOUT[15:8]
+ //GPIO GP6 Enable Register for VDOUT[23:16]
+
+ switch (num) //gpio 0-19 vdout11 12 18:21 -->gpio 31 32 38:41
+ {
+ case WMT_PIN_GP5_VDOUT11:
+ REG32_VAL(__GPIO_BASE+0x0044) |= (1<<(num-20)); //enable gpio
+ REG32_VAL(__GPIO_BASE+0x0084) &= ~(1<<(num-20)); //set input
+ break;
+ case WMT_PIN_GP5_VDOUT12:
+ REG32_VAL(__GPIO_BASE+0x0044) |= (1<<(num-20)); //enable gpio
+ REG32_VAL(__GPIO_BASE+0x0084) &= ~(1<<(num-20)); //set input
+ break;
+ case WMT_PIN_GP6_VDOUT18:
+ REG32_VAL(__GPIO_BASE+0x0044) |= (1<<(num-20)); //enable gpio
+ REG32_VAL(__GPIO_BASE+0x0084) &= ~(1<<(num-20)); //set input
+ break;
+ case WMT_PIN_GP6_VDOUT19:
+ REG32_VAL(__GPIO_BASE+0x0044) |= (1<<(num-20)); //enable gpio
+ REG32_VAL(__GPIO_BASE+0x0084) &= ~(1<<(num-20)); //set input
+ break;
+ case WMT_PIN_GP6_VDOUT20:
+ REG32_VAL(__GPIO_BASE+0x0044) |= (1<<(num-20)); //enable gpio
+ REG32_VAL(__GPIO_BASE+0x0084) &= ~(1<<(num-20)); //set input
+ break;
+ case WMT_PIN_GP6_VDOUT21:
+ REG32_VAL(__GPIO_BASE+0x0044) |= (1<<(num-20)); //enable gpio
+ REG32_VAL(__GPIO_BASE+0x0084) &= ~(1<<(num-20)); //set input
+ break;
+
+ }
+}
+
+static void wmt_disable_irqinput(void)
+{
+ int num = irq_gpio;
+
+ if (num > /*19*/42)
+ return;
+ if (num < 20)
+ {
+ REG32_VAL(__GPIO_BASE+0x0040) &= ~(1<<num); //enable gpio
+ REG32_VAL(__GPIO_BASE+0x0080) &= ~(1<<num); //set input
+ }
+
+ //GPIO GP5 Enable Register for VDOUT[15:8]
+ //GPIO GP6 Enable Register for VDOUT[23:16]
+
+ switch (num) //gpio 0-19 vdout11 12 18:21 -->gpio 31 32 38:41
+ {
+ case WMT_PIN_GP5_VDOUT11:
+ REG32_VAL(__GPIO_BASE+0x0044) &= ~(1<<(num-20)); //disable gpio
+ REG32_VAL(__GPIO_BASE+0x0084) &= ~(1<<(num-20)); //set input
+ break;
+ case WMT_PIN_GP5_VDOUT12:
+ REG32_VAL(__GPIO_BASE+0x0044) &= ~(1<<(num-20)); //enable gpio
+ REG32_VAL(__GPIO_BASE+0x0084) &= ~(1<<(num-20)); //set input
+ break;
+ case WMT_PIN_GP6_VDOUT18:
+ REG32_VAL(__GPIO_BASE+0x0044) &= ~(1<<(num-20)); //enable gpio
+ REG32_VAL(__GPIO_BASE+0x0084) &= ~(1<<(num-20)); //set input
+ break;
+ case WMT_PIN_GP6_VDOUT19:
+ REG32_VAL(__GPIO_BASE+0x0044) &= ~(1<<(num-20)); //enable gpio
+ REG32_VAL(__GPIO_BASE+0x0084) &= ~(1<<(num-20)); //set input
+ break;
+ case WMT_PIN_GP6_VDOUT20:
+ REG32_VAL(__GPIO_BASE+0x0044) &= ~(1<<(num-20)); //enable gpio
+ REG32_VAL(__GPIO_BASE+0x0084) &= ~(1<<(num-20)); //set input
+ break;
+ case WMT_PIN_GP6_VDOUT21:
+ REG32_VAL(__GPIO_BASE+0x0044) &= ~(1<<(num-20)); //enable gpio
+ REG32_VAL(__GPIO_BASE+0x0084) &= ~(1<<(num-20)); //set input
+ break;
+
+ }
+}
+
+static void wmt_pullup(void)
+{
+ int num = irq_gpio;
+
+ if(num >/*11*//*19*/42)
+ return;
+ if (num < 20)
+ {
+ REG32_VAL(__GPIO_BASE+0x04c0) |= (1<<num); //pull up!
+ REG32_VAL(__GPIO_BASE+0x0480) |= (1<<num); //enable pull up/down
+ }
+ switch (num)
+ { //vdout11 12 18:21-->gpio 31 32 38:41
+ case WMT_PIN_GP5_VDOUT11:
+ REG32_VAL(__GPIO_BASE+0x04c4) |= (1<<(num-20)); //pull up!
+ REG32_VAL(__GPIO_BASE+0x0484) |= (1<<(num-20)); //enable pull up/down
+ break;
+ case WMT_PIN_GP5_VDOUT12:
+ REG32_VAL(__GPIO_BASE+0x04c4) |= (1<<(num-20)); //pull up!
+ REG32_VAL(__GPIO_BASE+0x0484) |= (1<<(num-20)); //enable pull up/down
+ break;
+ case WMT_PIN_GP6_VDOUT18:
+ REG32_VAL(__GPIO_BASE+0x04c4) |= (1<<(num-20)); //pull up!
+ REG32_VAL(__GPIO_BASE+0x0484) |= (1<<(num-20)); //enable pull up/down
+ break;
+ case WMT_PIN_GP6_VDOUT19:
+ REG32_VAL(__GPIO_BASE+0x04c4) |= (1<<(num-20)); //pull up!
+ REG32_VAL(__GPIO_BASE+0x0484) |= (1<<(num-20)); //enable pull up/down
+ break;
+ case WMT_PIN_GP6_VDOUT20:
+ REG32_VAL(__GPIO_BASE+0x04c4) |= (1<<(num-20)); //pull up!
+ REG32_VAL(__GPIO_BASE+0x0484) |= (1<<(num-20)); //enable pull up/down
+ break;
+ case WMT_PIN_GP6_VDOUT21:
+ REG32_VAL(__GPIO_BASE+0x04c4) |= (1<<(num-20)); //pull up!
+ REG32_VAL(__GPIO_BASE+0x0484) |= (1<<(num-20)); //enable pull up/down
+ break;
+
+ }
+}
+
+int wmt_set_gpirq(int type)
+{
+ int shift;
+ int offset;
+ unsigned long reg;
+ int num = irq_gpio;
+
+ if(num >/*11*//*19*/42)
+ return -1;
+ //if (num > 9)
+ //GPIO_PIN_SHARING_SEL_4BYTE_VAL &= ~BIT4; // gpio10,11 as gpio
+#if 0
+ REG32_VAL(__GPIO_BASE+0x0040) &= ~(1<<num); // gpio disable
+ REG32_VAL(__GPIO_BASE+0x0080) &= ~(1<<num); //set input
+#endif
+ wmt_disable_irqinput();//replace 2014-8-22
+#if 0
+ REG32_VAL(__GPIO_BASE+0x04c0) |= (1<<num); //pull up!
+ REG32_VAL(__GPIO_BASE+0x0480) |= (1<<num); //enable pull up/down
+#endif
+ wmt_pullup();//replace 2014-8-22
+
+ //set gpio irq triger type
+ if(num < 4){//[0,3]
+ shift = num;
+ offset = 0x0300;
+ }else if(num >= 4 && num < 8){//[4,7]
+ shift = num-4;
+ offset = 0x0304;
+ }else if (num >= 8 && num <12){// [8,11]
+ shift = num-8;
+ offset = 0x0308;
+ }
+ else if (num>=12 && num < 16)
+ {
+ shift = num -12;
+ offset = 0x030c;
+ }
+ else if (num>=16 && num <20)
+ {
+ shift = num -16;
+ offset = 0x0310;
+ }
+ else ////vdout11 12 18:21-->gpio 31 32 38:41
+ { // 0x0314----0x0319
+ switch (num)
+ {
+ case WMT_PIN_GP5_VDOUT11:
+ shift = 0;
+ offset = 0x0314;
+ break;
+ case WMT_PIN_GP5_VDOUT12:
+ shift = 1;
+ offset = 0x0314;
+ break;
+
+ case WMT_PIN_GP6_VDOUT18:
+ shift = 2;
+ offset = 0x0314;
+ break;
+
+ case WMT_PIN_GP6_VDOUT19:
+ shift = 3;
+ offset = 0x0314;
+ break;
+ case WMT_PIN_GP6_VDOUT20:
+ shift = 0;
+ offset = 0x0318;
+ break;
+
+ case WMT_PIN_GP6_VDOUT21:
+
+ shift = 1;
+ offset = 0x0318;
+ break;
+ }
+ #if 0
+ else if (num>=20 && num<24) //videoOut11 12 18 19
+ {
+ shift = num -20;
+ offset = 0x0314;
+ }
+ else if (num>=24 && num<28) //videoOut20 21
+ {
+ shift = num -24;
+ offset = 0x0318;
+ }
+ #endif
+ }
+
+ reg = REG32_VAL(__GPIO_BASE + offset);
+
+ switch(type){
+ case IRQ_TYPE_LEVEL_LOW:
+ reg &= ~(1<<(shift*8+2));
+ reg &= ~(1<<(shift*8+1));
+ reg &= ~(1<<(shift*8));
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ reg &= ~(1<<(shift*8+2));
+ reg &= ~(1<<(shift*8+1));
+ reg |= (1<<(shift*8));
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ reg &= ~(1<<(shift*8+2));
+ reg |= (1<<(shift*8+1));
+ reg &= ~(1<<(shift*8));
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ reg &= ~(1<<(shift*8+2));
+ reg |= (1<<(shift*8+1));
+ reg |= (1<<(shift*8));
+ break;
+ default://both edge
+ reg |= (1<<(shift*8+2));
+ reg &= ~(1<<(shift*8+1));
+ reg &= ~(1<<(shift*8));
+ break;
+
+ }
+ //reg |= 1<<(shift*8+7);//enable interrupt
+ reg &= ~(1<<(shift*8+7)); //disable int
+
+ REG32_VAL(__GPIO_BASE + offset) = reg;
+ wmt_clr_int(); //replace 2014-8-22
+ //REG32_VAL(__GPIO_BASE+0x0360) = 1<<num; //clear interrupt status// 1 bit per int
+ msleep(5);
+ return 0;
+}
+
+int wmt_enable_gpirq(void)
+{
+ int num = irq_gpio;
+
+ if(num > /*11*/ /*19*/42)
+ return -1;
+
+ if(num<4)
+ REG32_VAL(__GPIO_BASE+0x0300) |= 1<<(num*8+7); //enable interrupt
+ else if(num >= 4 && num < 8)
+ REG32_VAL(__GPIO_BASE+0x0304) |= 1<<((num-4)*8+7); //enable interrupt
+ else if (num >= 8 && num < 12)
+ REG32_VAL(__GPIO_BASE+0x0308) |= 1<<((num-8)*8+7); //enable interrupt
+ else if (num >= 12 && num < 16)
+ REG32_VAL(__GPIO_BASE+0x030c) |= 1<<((num-12)*8+7); //enable interrupt
+ else if (num >= 16 && num < 20)
+ REG32_VAL(__GPIO_BASE+0x0310) |= 1<<((num-16)*8+7); //enable interrupt
+ else ////vdout11 12 18:21-->gpio 31 32 38:41
+ { // 0x0314----0x0319
+ switch (num)
+ {
+ case WMT_PIN_GP5_VDOUT11:
+ REG32_VAL(__GPIO_BASE+0x0314) |= 1<<(0*8+7); //enable interrupt
+
+ break;
+ case WMT_PIN_GP5_VDOUT12:
+ REG32_VAL(__GPIO_BASE+0x0314) |= 1<<(1*8+7); //enable interrupt
+ break;
+
+ case WMT_PIN_GP6_VDOUT18:
+ REG32_VAL(__GPIO_BASE+0x0314) |= 1<<(2*8+7); //enable interrupt
+ break;
+
+ case WMT_PIN_GP6_VDOUT19:
+ REG32_VAL(__GPIO_BASE+0x0314) |= 1<<(3*8+7); //enable interrupt
+ break;
+ case WMT_PIN_GP6_VDOUT20:
+ REG32_VAL(__GPIO_BASE+0x0318) |= 1<<(0*8+7); //enable interrupt
+
+ break;
+
+ case WMT_PIN_GP6_VDOUT21:
+
+ REG32_VAL(__GPIO_BASE+0x0318) |= 1<<(1*8+7); //enable interrupt
+ break;
+ }
+
+ }
+
+ return 0;
+}
+
+int wmt_disable_gpirq(void)
+{
+ int num = irq_gpio;
+
+ if(num > /*11*//*19*/42)
+ return -1;
+
+ if(num<4)
+ REG32_VAL(__GPIO_BASE+0x0300) &= ~(1<<(num*8+7)); //disable interrupt
+ else if(num >= 4 && num < 8)
+ REG32_VAL(__GPIO_BASE+0x0304) &= ~(1<<((num-4)*8+7)); //enable interrupt
+ else if (num >= 8 && num <12)
+ REG32_VAL(__GPIO_BASE+0x0308) &= ~(1<<((num-8)*8+7)); //enable interrupt
+ else if (num >= 12 && num <16)
+ REG32_VAL(__GPIO_BASE+0x030c) &= ~(1<<((num-8)*8+7)); //enable interrupt
+ else if (num >= 16 && num <20)
+ REG32_VAL(__GPIO_BASE+0x0310) &= ~(1<<((num-8)*8+7)); //enable interrupt
+ else ////vdout11 12 18:21-->gpio 31 32 38:41
+ { // 0x0314----0x0319
+ switch (num)
+ {
+ case WMT_PIN_GP5_VDOUT11:
+ REG32_VAL(__GPIO_BASE+0x0314) &= ~(1<<(0*8+7)); //enable interrupt
+
+ break;
+ case WMT_PIN_GP5_VDOUT12:
+ REG32_VAL(__GPIO_BASE+0x0314) &= ~(1<<(1*8+7)); //enable interrupt
+ break;
+
+ case WMT_PIN_GP6_VDOUT18:
+ REG32_VAL(__GPIO_BASE+0x0314) &= ~(1<<(2*8+7)); //enable interrupt
+ break;
+
+ case WMT_PIN_GP6_VDOUT19:
+ REG32_VAL(__GPIO_BASE+0x0314) &= ~(1<<(3*8+7)); //enable interrupt
+ break;
+ case WMT_PIN_GP6_VDOUT20:
+ REG32_VAL(__GPIO_BASE+0x0318) &= ~(1<<(0*8+7)); //enable interrupt
+
+ break;
+
+ case WMT_PIN_GP6_VDOUT21:
+
+ REG32_VAL(__GPIO_BASE+0x0318) &= ~(1<<(1*8+7)); //enable interrupt
+ break;
+ }
+
+ }
+
+ return 0;
+}
+
+static int pn544_wmt_is_int(int num)
+{
+ if (num > 42)
+ {
+ return 0;
+ }
+ if (num < 20)
+ {
+ return REG32_VAL(__GPIO_BASE+0x0360) & (1<<num) ? 1 : 0;
+ }
+ else
+ {
+ switch (num)
+ {
+ case WMT_PIN_GP5_VDOUT11:
+ return REG32_VAL(__GPIO_BASE+0x0360) & (1<<20) ? 1 : 0;
+ break;
+ case WMT_PIN_GP5_VDOUT12:
+ return REG32_VAL(__GPIO_BASE+0x0360) & (1<<21) ? 1 : 0;
+ break;
+ case WMT_PIN_GP6_VDOUT18:
+ return REG32_VAL(__GPIO_BASE+0x0360) & (1<<22) ? 1 : 0;
+ break;
+ case WMT_PIN_GP6_VDOUT19:
+ return REG32_VAL(__GPIO_BASE+0x0360) & (1<<23) ? 1 : 0;
+ break;
+ case WMT_PIN_GP6_VDOUT20:
+ return REG32_VAL(__GPIO_BASE+0x0360) & (1<<24) ? 1 : 0;
+ break;
+ case WMT_PIN_GP6_VDOUT21:
+ return REG32_VAL(__GPIO_BASE+0x0360) & (1<<25) ? 1 : 0;
+ break;
+
+ }
+
+ }
+ //return (REG32_VAL(__GPIO_BASE+0x0360) & (1<<num)) ? 1: 0;
+}
+//*************add end
+
+
+static void pn544_disable_irq(struct pn544_dev *pn544_dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pn544_dev->irq_enabled_lock, flags);
+ if (pn544_dev->irq_enabled) {
+ wmt_disable_gpirq();
+ //disable_irq_nosync(pn544_dev->client->irq);
+ pn544_dev->irq_enabled = false;
+ }
+ spin_unlock_irqrestore(&pn544_dev->irq_enabled_lock, flags);
+}
+
+static irqreturn_t pn544_dev_irq_handler(int irq, void *dev_id)
+{
+ struct pn544_dev *pn544_dev = dev_id;
+ //printk("<<<<<<<%s!\n", __func__);
+#if 0
+ if (!gpio_get_value(pn544_dev->irq_gpio)) {
+
+ return IRQ_NONE;
+ //return IRQ_HANDLED;//???
+ }
+#endif
+ if (pn544_wmt_is_int(pn544_dev->irq_gpio))
+ {
+ wmt_clr_int();
+ pn544_disable_irq(pn544_dev);
+
+ //printk("<<<<<<<%s! wakeup reader!\n", __func__);
+ /* Wake up waiting readers */
+ wake_up(&pn544_dev->read_wq);
+ return IRQ_HANDLED;
+ }
+ else
+ {
+ return IRQ_NONE;
+ }
+}
+
+static ssize_t pn544_dev_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *offset)
+{
+ struct pn544_dev *pn544_dev = filp->private_data;
+ char tmp[MAX_BUFFER_SIZE];
+ int ret,i;
+
+ if (count > MAX_BUFFER_SIZE)
+ count = MAX_BUFFER_SIZE;
+
+ printk("%s : reading %zu bytes.\n", __func__, count);
+
+ mutex_lock(&pn544_dev->read_mutex);
+
+ printk("%s irq gpio %d val %d\n", __func__, pn544_dev->irq_gpio, gpio_get_value(pn544_dev->irq_gpio));
+ if (/*!gpio_get_value(pn544_dev->irq_gpio)*/gpio_get_value(pn544_dev->irq_gpio)!=pn544_dev->irq_active) {
+ printk("%s &&& waitting for interrupt!\n", __func__);
+ if (filp->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ goto fail;
+ }
+
+ while (1) {
+ pn544_dev->irq_enabled = true;
+ wmt_enable_gpirq();
+ //enable_irq(pn544_dev->client->irq);
+
+ printk("%s &&& enter waitting for interrupt!\n", __func__);
+
+ ret = wait_event_interruptible(
+ pn544_dev->read_wq,
+ !pn544_dev->irq_enabled/*false??*/);
+
+ /*ret = wait_event_interruptible(pn544_dev->read_wq,
+ gpio_get_value(pn544_dev->irq_gpio));
+ */
+
+ pn544_disable_irq(pn544_dev);
+
+ if (ret)
+ goto fail;
+ if (gpio_get_value(pn544_dev->irq_gpio)==pn544_dev->irq_active)
+ break;
+
+ pr_warning("%s: spurious interrupt detected\n", __func__);
+ }
+ }
+
+ /* Read data */
+ ret = i2c_master_recv(pn544_dev->client, tmp, count);
+ mutex_unlock(&pn544_dev->read_mutex);
+ /* pn544 seems to be slow in handling I2C read requests
+ * so add 1ms delay after recv operation */
+ udelay(1000);
+
+ if (ret < 0) {
+ pr_err("%s: i2c_master_recv returned %d\n", __func__, ret);
+ return ret;
+ }
+ if (ret > count) {
+ pr_err("%s: received too many bytes from i2c (%d)\n",
+ __func__, ret);
+ return -EIO;
+ }
+ if (copy_to_user(buf, tmp, ret)) {
+ pr_warning("%s : failed to copy to user space\n", __func__);
+ return -EFAULT;
+ }
+
+ printk("IFD->PC:");
+ for(i = 0; i < ret; i++){
+ printk(" %02X", tmp[i]);
+ }
+ printk("\n");
+
+ return ret;
+
+fail:
+ printk("%s fail end! ret :%x\n", __func__, ret);
+ mutex_unlock(&pn544_dev->read_mutex);
+ return ret;
+}
+
+static ssize_t pn544_dev_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *offset)
+{
+ struct pn544_dev *pn544_dev;
+ char tmp[MAX_BUFFER_SIZE];
+ int ret,i;
+
+ pn544_dev = filp->private_data;
+
+ if (count > MAX_BUFFER_SIZE)
+ count = MAX_BUFFER_SIZE;
+
+ if (copy_from_user(tmp, buf, count)) {
+ pr_err("%s : failed to copy from user space\n", __func__);
+ return -EFAULT;
+ }
+
+ printk("%s : writing %zu bytes.\n", __func__, count);
+ /* Write data */
+ ret = i2c_master_send(pn544_dev->client, tmp, count);
+ if (ret != count) {
+ pr_err("%s : i2c_master_send returned %d\n", __func__, ret);
+ ret = -EIO;
+ }
+ printk("PC->IFD:");
+ for(i = 0; i < count; i++){
+ printk(" %02X", tmp[i]);
+ }
+ /* pn544 seems to be slow in handling I2C write requests
+ * so add 1ms delay after I2C send oparation */
+ udelay(1000);
+
+ return ret;
+}
+
+static int pn544_dev_open(struct inode *inode, struct file *filp)
+{
+ struct pn544_dev *pn544_dev = container_of(filp->private_data,
+ struct pn544_dev,
+ pn544_device);
+
+ filp->private_data = pn544_dev;
+
+ pr_debug("%s : %d,%d\n", __func__, imajor(inode), iminor(inode));
+
+ return 0;
+}
+
+static /*int*/long pn544_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct pn544_dev *pn544_dev = filp->private_data;
+
+ switch (cmd) {
+ case PN544_SET_PWR:
+ if (arg == 2) {
+ /* power on with firmware download (requires hw reset)
+ */
+ printk("%s power on with firmware\n", __func__);
+ gpio_set_value(pn544_dev->ven_gpio, /*1*/pn544_dev->ven_active);
+ msleep(20);
+ if (pn544_dev->firm_gpio >= 0)
+ gpio_set_value(pn544_dev->firm_gpio, /*1*/pn544_dev->firm_active);
+ msleep(20);
+ gpio_set_value(pn544_dev->ven_gpio, /*0*/!pn544_dev->ven_active);
+ msleep(100);
+ gpio_set_value(pn544_dev->ven_gpio, /*1*/pn544_dev->ven_active);
+ msleep(20);
+ pn544_dev->ven_on_off = 1;
+ } else if (arg == 1) {
+ /* power on */
+ printk("%s power on\n", __func__);
+ if (pn544_dev->firm_gpio >= 0)
+ gpio_set_value(pn544_dev->firm_gpio, /*0*/!pn544_dev->firm_active);
+ gpio_set_value(pn544_dev->ven_gpio, /*1*/pn544_dev->ven_active);
+ msleep(100);
+ pn544_dev->ven_on_off = 1;
+ } else if (arg == 0) {
+ /* power off */
+ printk("%s power off ven %d \n", __func__, !pn544_dev->ven_active);
+ if (pn544_dev->firm_gpio >= 0)
+ gpio_set_value(pn544_dev->firm_gpio, /*0*/!pn544_dev->firm_active);
+ gpio_set_value(pn544_dev->ven_gpio, /*0*/!pn544_dev->ven_active);
+
+ pn544_dev->ven_on_off = 0;
+ //gpio_set_value(pn544_dev->ven_gpio, 0);
+ msleep(100);
+ } else {
+ printk("%s bad arg %u\n", __func__, arg);
+ return -EINVAL;
+ }
+ break;
+ default:
+ printk("%s bad ioctl %u\n", __func__, cmd);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct file_operations pn544_dev_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .read = pn544_dev_read,
+ .write = pn544_dev_write,
+ .open = pn544_dev_open,
+ .unlocked_ioctl = pn544_dev_ioctl,
+};
+
+
+static int pn544_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct pn544_nfc_platform_data *platform_data;
+ struct pn544_dev *pn544_dev;
+
+ platform_data = client->dev.platform_data;
+
+ if (platform_data == NULL) {
+ pr_err("%s : nfc probe fail\n", __func__);
+ return -ENODEV;
+ }
+
+ printk("nfc probe step01 is ok\n");
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ pr_err("%s : need I2C_FUNC_I2C\n", __func__);
+ return -ENODEV;
+ }
+
+ printk("nfc probe step02 is ok\n");
+
+ ret = gpio_request(platform_data->irq_gpio, "nfc_int");
+ if (ret)
+ return -ENODEV;
+
+
+
+ ret = gpio_request(platform_data->ven_gpio, "nfc_ven");
+ if (ret)
+ goto err_ven;
+
+
+ if (platform_data->firm_gpio >= 0)
+ {
+
+ ret = gpio_request(platform_data->firm_gpio, "nfc_firm");
+ if (ret)
+ goto err_firm;
+
+ }
+
+ pn544_dev = kzalloc(sizeof(*pn544_dev), GFP_KERNEL);
+ if (pn544_dev == NULL) {
+ dev_err(&client->dev,
+ "failed to allocate memory for module data\n");
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+
+ printk("nfc probe step04 is ok\n");
+
+ //pn544_dev->irq_enable = platform_data->irq_enable;
+ pn544_dev->irq_gpio = platform_data->irq_gpio;
+ pn544_dev->irq_active = platform_data->irq_active;
+
+ //pn544_dev->ven_enable = platform_data->ven_enable;
+ pn544_dev->ven_gpio = platform_data->ven_gpio;
+ pn544_dev->ven_active = platform_data->ven_active;
+ pn544_dev->ven_on_off = 0;
+ //pn544_dev->firm_enable = platform_data->firm_enable;
+ pn544_dev->firm_gpio = platform_data->firm_gpio;
+ pn544_dev->firm_active = platform_data->firm_active;
+
+ pn544_dev->client = client;
+
+ irq_gpio = pn544_dev->irq_gpio;
+ /* init mutex and queues */
+ init_waitqueue_head(&pn544_dev->read_wq);
+ mutex_init(&pn544_dev->read_mutex);
+ spin_lock_init(&pn544_dev->irq_enabled_lock);
+
+ pn544_dev->pn544_device.minor = MISC_DYNAMIC_MINOR;
+ switch (g_chip_nr)
+ {
+ case 7:
+ pn544_dev->pn544_device.name = "pn547"; //"pn544" modify 2014-7-21
+ break;
+ case 4:
+ pn544_dev->pn544_device.name = "pn544"; //"pn544" modify 2014-8-22
+ break;
+ }
+ pn544_dev->pn544_device.fops = &pn544_dev_fops;
+
+ ret = misc_register(&pn544_dev->pn544_device);
+ if (ret) {
+ pr_err("%s : misc_register failed\n", __FILE__);
+ goto err_misc_register;
+ }
+ printk("nfc probe step05 is ok\n");
+
+ /* request irq. the irq is set whenever the chip has data available
+ * for reading. it is cleared when all data has been read.
+ */
+
+
+ gpio_direction_output(platform_data->ven_gpio,/*0*/ !platform_data->ven_active);
+
+ if (platform_data->firm_gpio >= 0)
+ gpio_direction_output(platform_data->firm_gpio,/*0*/!platform_data->firm_active);
+ //s3c_gpio_setpull(platform_data->ven_gpio, S3C_GPIO_PULL_UP);
+ //s3c_gpio_setpull(platform_data->firm_gpio, S3C_GPIO_PULL_DOWN);
+
+ //eint16 setting
+
+ gpio_direction_input(platform_data->irq_gpio);
+ //wmt_gpio_setpull(platform_data->irq_gpio, WMT_GPIO_PULL_UP);
+ if (platform_data->irq_active)
+ {
+ printk("%s irq pull down!\n", __func__);
+ wmt_gpio_setpull(platform_data->irq_gpio, WMT_GPIO_PULL_DOWN); //modify 2014-7-16
+ }
+ else
+ {
+ printk("%s irq pull up!\n", __func__);
+ wmt_gpio_setpull(platform_data->irq_gpio, WMT_GPIO_PULL_UP);
+ }
+ //s3c_gpio_setpull(platform_data->irq_gpio, S3C_GPIO_PULL_UP);
+
+
+ pr_info("%s : requesting IRQ %d\n", __func__, client->irq);
+ pn544_dev->irq_enabled = true;
+
+ ret = request_irq(client->irq, pn544_dev_irq_handler,
+ /*platform_data->irq_active?IRQF_TRIGGER_HIGH:IRQF_TRIGGER_LOW*/IRQF_SHARED, \
+ client->name, pn544_dev);//IRQF_TRIGGER_RISING IRQF_TRIGGER_HIGH
+ if (ret) {
+ printk(/*&client->dev, */"request_irq failed\n");
+ goto err_request_irq_failed;
+ }
+ printk("nfc probe step06 is ok\n");
+ //add 2014-7-16 for gpio irq config
+ wmt_set_irqinput();
+ wmt_set_gpirq(platform_data->irq_active?IRQF_TRIGGER_HIGH:IRQF_TRIGGER_LOW); //IRQF_TRIGGER_HIGH
+ //wmt_enable_gpirq();
+ //add end
+
+ pn544_disable_irq(pn544_dev);
+ i2c_set_clientdata(client, pn544_dev);
+
+ //add debug 2014-7-10
+#if 0
+ printk("%s power on\n", __func__);
+ gpio_set_value(pn544_dev->firm_gpio, 0);
+ gpio_set_value(pn544_dev->ven_gpio, 1);
+ msleep(10);
+
+ int maddr = 1;
+ char mbuf[2] = {0};
+ for (maddr=1; maddr<0x7f; maddr++)
+ {
+ mbuf[0] = maddr;
+ /* Write data */
+ //pn544_dev->client->addr = maddr;
+ ret = i2c_master_send(pn544_dev->client, mbuf, 1);
+ if (ret != 1) {
+ pr_err("%s : reg 0x%x i2c_master_send returned %d\n", __func__, maddr, ret);
+ //ret = -EIO;
+ }
+ else
+ {
+ pr_err("%s ok!!!: reg 0x%x i2c_master_send returned %d\n", __func__, maddr, ret);
+ //break;
+ }
+
+ struct i2c_msg msg[2] = {
+ {.addr = client->addr,
+ .flags = 0|I2C_M_NOSTART,
+ .len = 1,
+ .buf = &mbuf[0],
+ },
+ { .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = &mbuf[1],
+ },
+ };
+
+ ret = i2c_transfer(client->adapter, msg, 2);
+
+ //ret = i2c_master_recv(pn544_dev->client, mbuf, 1);
+ if (ret != 2) {
+ pr_err("%s : addr 0x%x i2c_master_recv %d returned %d\n", __func__, maddr, mbuf[1], ret);
+ //ret = -EIO;
+ }
+ else
+ {
+ pr_err("%s ok!!!: addr 0x%x i2c_master_recv %d returned %d\n", __func__, maddr, mbuf[1], ret);
+ //break;
+ }
+
+ }
+#endif
+ //add end
+
+ printk("nfc probe step07 is ok\n");
+
+ return 0;
+
+err_request_irq_failed:
+ misc_deregister(&pn544_dev->pn544_device);
+err_misc_register:
+ mutex_destroy(&pn544_dev->read_mutex);
+ kfree(pn544_dev);
+err_exit:
+ if (platform_data->firm_gpio >= 0)
+ gpio_free(platform_data->firm_gpio);
+err_firm:
+
+ gpio_free(platform_data->ven_gpio);
+err_ven:
+
+ gpio_free(platform_data->irq_gpio);
+ return ret;
+}
+
+static int pn544_remove(struct i2c_client *client)
+{
+ struct pn544_dev *pn544_dev;
+
+ pn544_dev = i2c_get_clientdata(client);
+ free_irq(client->irq, pn544_dev);
+ misc_deregister(&pn544_dev->pn544_device);
+ mutex_destroy(&pn544_dev->read_mutex);
+ gpio_free(pn544_dev->irq_gpio);
+ gpio_free(pn544_dev->ven_gpio);
+ gpio_free(pn544_dev->firm_gpio);
+ kfree(pn544_dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id pn544_id[] = {
+ { "pn544", 0 },
+ { }
+};
+
+static int pn544_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+ struct pn544_dev *pn544_dev;
+
+ pn544_dev = i2c_get_clientdata(client);
+ printk("\n%s on_off %d\n", __func__, pn544_dev->ven_on_off);
+ return 0;
+}
+
+static int pn544_resume(struct i2c_client *client)
+{
+ struct pn544_dev *pn544_dev;
+
+ pn544_dev = i2c_get_clientdata(client);
+
+ printk("%s on_off %d\n", __func__, pn544_dev->ven_on_off);
+
+ gpio_direction_input(pn544_dev->irq_gpio);
+ //wmt_gpio_setpull(platform_data->irq_gpio, WMT_GPIO_PULL_UP);
+ if (pn544_dev->irq_active)
+ wmt_gpio_setpull(pn544_dev->irq_gpio, WMT_GPIO_PULL_DOWN); //modify 2014-7-16
+ else
+ wmt_gpio_setpull(pn544_dev->irq_gpio, WMT_GPIO_PULL_UP);
+
+ //add 2014-7-16 for gpio irq config
+ wmt_set_irqinput();
+ wmt_set_gpirq(pn544_dev->irq_active?IRQF_TRIGGER_HIGH:IRQF_TRIGGER_LOW); //IRQF_TRIGGER_HIGH
+ //wmt_enable_gpirq();
+ //add end
+
+ pn544_disable_irq(pn544_dev);
+
+ printk("%s active %d, !active %d\n", __func__, pn544_dev->ven_active, !pn544_dev->ven_active);
+ if (pn544_dev->ven_on_off)
+ gpio_set_value(pn544_dev->ven_gpio, pn544_dev->ven_active);
+ else
+ gpio_set_value(pn544_dev->ven_gpio, !pn544_dev->ven_active);
+
+ //gpio_set_value(pn544_dev->ven_gpio, 0);
+
+ return 0;
+}
+static struct i2c_driver pn544_driver = {
+ .id_table = pn544_id,
+ .probe = pn544_probe,
+ .remove = pn544_remove,
+ .suspend = pn544_suspend,
+ .resume = pn544_resume,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "pn544",
+ },
+};
+
+/*
+ * module load/unload record keeping
+ */
+
+static struct pn544_nfc_platform_data pn544_pdata = {
+ //.irq_enable = 1,
+ .irq_gpio = WMT_PIN_GP0_GPIO0,
+ .irq_active = 1,
+ //.ven_enable = 1,
+ .ven_gpio= WMT_PIN_GP18_UART0RTS, // WMT_PIN_GP18_UART0RTS WMT_PIN_GP0_GPIO1
+ .ven_active = 1,
+ //.firm_enable = 1,
+ .firm_gpio=WMT_PIN_GP0_GPIO1, //WMT_PIN_GP0_GPIO2,
+ .firm_active = 1,
+};
+static int g_i2c_adapter = 0;
+static int g_i2c_addr = 0x28;
+static int get_board_info(void)
+{
+ int ret;
+ char buf[100] = {0};
+ int len = sizeof(buf);
+
+ char *pbuf = "wmt.nfc.pn54x"; // wmt.nfc.pn547
+ printk("%s\n", __func__);
+ //wmt.nfc.pn54x 4:43:0:39:1:1:1:132:1 2b-->43 wmt.nfc.pn54x 4:43:0:39:1:1:0:132:1
+ ret = wmt_getsyspara(pbuf, buf, &len);
+ if (!ret)
+ {
+ printk("%s %s:%s\n", __func__, pbuf, buf);//irq ven firm
+
+ ret = sscanf(buf, "%d:%d:%d:%d:%d:%d:%d:%d:%d", \
+ &g_chip_nr, &g_i2c_addr, &g_i2c_adapter, \
+ &pn544_pdata.irq_gpio, &pn544_pdata.irq_active, \
+ &pn544_pdata.ven_gpio, &pn544_pdata.ven_active, \
+ &pn544_pdata.firm_gpio, &pn544_pdata.firm_active);
+
+ printk("chip nr %d,i2c addr %d adapter %d, irq gpio %d active %d, ven %d %d, firm %d %d\n", \
+ g_chip_nr, g_i2c_addr, g_i2c_adapter, \
+ pn544_pdata.irq_gpio, pn544_pdata.irq_active, \
+ pn544_pdata.ven_gpio, pn544_pdata.ven_active, \
+ pn544_pdata.firm_gpio, pn544_pdata.firm_active);
+
+ return 0;
+ }
+ else
+ {
+ printk("%s not get %s, use default\n", __func__, pbuf);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int __init pn544_dev_init(void)
+{
+ //pr_info("Loading pn544 driver\n");
+
+ int r;
+ struct i2c_adapter *adapter;
+
+
+
+ struct i2c_board_info wmt_pn544_bi = {
+ .type = PN544_DRIVER_NAME,
+ .flags = 0x00,
+ .addr = CLIENT_ADDR,
+ .platform_data = &pn544_pdata, //custom 2014-7-25
+ .archdata = NULL,
+ .irq = IRQ_GPIO,
+ };
+ //wmt_pn544_bi.addr = g_i2c_addr;
+
+ pr_debug(DRIVER_DESC ": %s\n", __func__);
+ r = get_board_info();
+ if (r < 0)
+ {
+ printk("%s no env!!\n", __func__);
+ return r;
+ }
+ wmt_pn544_bi.addr = g_i2c_addr;
+ adapter = i2c_get_adapter(/*WMT_PN544_I2C_CHANNEL*/g_i2c_adapter);
+ if (adapter == NULL) {
+ printk("can not get i2c adapter, client address error");
+ return -ENODEV;
+ }
+
+ pn544_client = i2c_new_device(adapter, &wmt_pn544_bi);
+ if ( pn544_client == NULL) {
+ printk("allocate i2c client failed");
+ return -ENOMEM;
+ }
+
+ i2c_put_adapter(adapter);
+
+ r = i2c_add_driver(&pn544_driver);
+ if (r) {
+ pr_err(PN544_DRIVER_NAME ": driver registration failed\n");
+ return r;
+ }
+
+ return 0;
+ //return i2c_add_driver(&pn544_driver);
+}
+module_init(pn544_dev_init);
+
+static void __exit pn544_dev_exit(void)
+{
+ pr_info("Unloading pn544 driver\n");
+ i2c_del_driver(&pn544_driver);
+
+ i2c_unregister_device(pn544_client);
+}
+module_exit(pn544_dev_exit);
+
+MODULE_AUTHOR("Sylvain Fonteneau");
+MODULE_DESCRIPTION("NFC PN544 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/pti.c b/drivers/misc/pti.c
new file mode 100644
index 00000000..383133b2
--- /dev/null
+++ b/drivers/misc/pti.c
@@ -0,0 +1,996 @@
+/*
+ * pti.c - PTI driver for cJTAG data extration
+ *
+ * Copyright (C) Intel 2010
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * The PTI (Parallel Trace Interface) driver directs trace data routed from
+ * various parts in the system out through the Intel Penwell PTI port and
+ * out of the mobile device for analysis with a debugging tool
+ * (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7,
+ * compact JTAG, standard.
+ */
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/console.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/pci.h>
+#include <linux/mutex.h>
+#include <linux/miscdevice.h>
+#include <linux/pti.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#define DRIVERNAME "pti"
+#define PCINAME "pciPTI"
+#define TTYNAME "ttyPTI"
+#define CHARNAME "pti"
+#define PTITTY_MINOR_START 0
+#define PTITTY_MINOR_NUM 2
+#define MAX_APP_IDS 16 /* 128 channel ids / u8 bit size */
+#define MAX_OS_IDS 16 /* 128 channel ids / u8 bit size */
+#define MAX_MODEM_IDS 16 /* 128 channel ids / u8 bit size */
+#define MODEM_BASE_ID 71 /* modem master ID address */
+#define CONTROL_ID 72 /* control master ID address */
+#define CONSOLE_ID 73 /* console master ID address */
+#define OS_BASE_ID 74 /* base OS master ID address */
+#define APP_BASE_ID 80 /* base App master ID address */
+#define CONTROL_FRAME_LEN 32 /* PTI control frame maximum size */
+#define USER_COPY_SIZE 8192 /* 8Kb buffer for user space copy */
+#define APERTURE_14 0x3800000 /* offset to first OS write addr */
+#define APERTURE_LEN 0x400000 /* address length */
+
+struct pti_tty {
+ struct pti_masterchannel *mc;
+};
+
+struct pti_dev {
+ struct tty_port port;
+ unsigned long pti_addr;
+ unsigned long aperture_base;
+ void __iomem *pti_ioaddr;
+ u8 ia_app[MAX_APP_IDS];
+ u8 ia_os[MAX_OS_IDS];
+ u8 ia_modem[MAX_MODEM_IDS];
+};
+
+/*
+ * This protects access to ia_app, ia_os, and ia_modem,
+ * which keeps track of channels allocated in
+ * an aperture write id.
+ */
+static DEFINE_MUTEX(alloclock);
+
+static struct pci_device_id pci_ids[] __devinitconst = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x82B)},
+ {0}
+};
+
+static struct tty_driver *pti_tty_driver;
+static struct pti_dev *drv_data;
+
+static unsigned int pti_console_channel;
+static unsigned int pti_control_channel;
+
+/**
+ * pti_write_to_aperture()- The private write function to PTI HW.
+ *
+ * @mc: The 'aperture'. It's part of a write address that holds
+ * a master and channel ID.
+ * @buf: Data being written to the HW that will ultimately be seen
+ * in a debugging tool (Fido, Lauterbach).
+ * @len: Size of buffer.
+ *
+ * Since each aperture is specified by a unique
+ * master/channel ID, no two processes will be writing
+ * to the same aperture at the same time so no lock is required. The
+ * PTI-Output agent will send these out in the order that they arrived, and
+ * thus, it will intermix these messages. The debug tool can then later
+ * regroup the appropriate message segments together reconstituting each
+ * message.
+ */
+static void pti_write_to_aperture(struct pti_masterchannel *mc,
+ u8 *buf,
+ int len)
+{
+ int dwordcnt;
+ int final;
+ int i;
+ u32 ptiword;
+ u32 __iomem *aperture;
+ u8 *p = buf;
+
+ /*
+ * calculate the aperture offset from the base using the master and
+ * channel id's.
+ */
+ aperture = drv_data->pti_ioaddr + (mc->master << 15)
+ + (mc->channel << 8);
+
+ dwordcnt = len >> 2;
+ final = len - (dwordcnt << 2); /* final = trailing bytes */
+ if (final == 0 && dwordcnt != 0) { /* always need a final dword */
+ final += 4;
+ dwordcnt--;
+ }
+
+ for (i = 0; i < dwordcnt; i++) {
+ ptiword = be32_to_cpu(*(u32 *)p);
+ p += 4;
+ iowrite32(ptiword, aperture);
+ }
+
+ aperture += PTI_LASTDWORD_DTS; /* adding DTS signals that is EOM */
+
+ ptiword = 0;
+ for (i = 0; i < final; i++)
+ ptiword |= *p++ << (24-(8*i));
+
+ iowrite32(ptiword, aperture);
+ return;
+}
+
+/**
+ * pti_control_frame_built_and_sent()- control frame build and send function.
+ *
+ * @mc: The master / channel structure on which the function
+ * built a control frame.
+ * @thread_name: The thread name associated with the master / channel or
+ * 'NULL' if using the 'current' global variable.
+ *
+ * To be able to post process the PTI contents on host side, a control frame
+ * is added before sending any PTI content. So the host side knows on
+ * each PTI frame the name of the thread using a dedicated master / channel.
+ * The thread name is retrieved from 'current' global variable if 'thread_name'
+ * is 'NULL', else it is retrieved from 'thread_name' parameter.
+ * This function builds this frame and sends it to a master ID CONTROL_ID.
+ * The overhead is only 32 bytes since the driver only writes to HW
+ * in 32 byte chunks.
+ */
+static void pti_control_frame_built_and_sent(struct pti_masterchannel *mc,
+ const char *thread_name)
+{
+ /*
+ * Since we access the comm member in current's task_struct, we only
+ * need to be as large as what 'comm' in that structure is.
+ */
+ char comm[TASK_COMM_LEN];
+ struct pti_masterchannel mccontrol = {.master = CONTROL_ID,
+ .channel = 0};
+ const char *thread_name_p;
+ const char *control_format = "%3d %3d %s";
+ u8 control_frame[CONTROL_FRAME_LEN];
+
+ if (!thread_name) {
+ if (!in_interrupt())
+ get_task_comm(comm, current);
+ else
+ strncpy(comm, "Interrupt", TASK_COMM_LEN);
+
+ /* Absolutely ensure our buffer is zero terminated. */
+ comm[TASK_COMM_LEN-1] = 0;
+ thread_name_p = comm;
+ } else {
+ thread_name_p = thread_name;
+ }
+
+ mccontrol.channel = pti_control_channel;
+ pti_control_channel = (pti_control_channel + 1) & 0x7f;
+
+ snprintf(control_frame, CONTROL_FRAME_LEN, control_format, mc->master,
+ mc->channel, thread_name_p);
+ pti_write_to_aperture(&mccontrol, control_frame, strlen(control_frame));
+}
+
+/**
+ * pti_write_full_frame_to_aperture()- high level function to
+ * write to PTI.
+ *
+ * @mc: The 'aperture'. It's part of a write address that holds
+ * a master and channel ID.
+ * @buf: Data being written to the HW that will ultimately be seen
+ * in a debugging tool (Fido, Lauterbach).
+ * @len: Size of buffer.
+ *
+ * All threads sending data (either console, user space application, ...)
+ * are calling the high level function to write to PTI meaning that it is
+ * possible to add a control frame before sending the content.
+ */
+static void pti_write_full_frame_to_aperture(struct pti_masterchannel *mc,
+ const unsigned char *buf,
+ int len)
+{
+ pti_control_frame_built_and_sent(mc, NULL);
+ pti_write_to_aperture(mc, (u8 *)buf, len);
+}
+
+/**
+ * get_id()- Allocate a master and channel ID.
+ *
+ * @id_array: an array of bits representing what channel
+ * id's are allocated for writing.
+ * @max_ids: The max amount of available write IDs to use.
+ * @base_id: The starting SW channel ID, based on the Intel
+ * PTI arch.
+ * @thread_name: The thread name associated with the master / channel or
+ * 'NULL' if using the 'current' global variable.
+ *
+ * Returns:
+ * pti_masterchannel struct with master, channel ID address
+ * 0 for error
+ *
+ * Each bit in the arrays ia_app and ia_os correspond to a master and
+ * channel id. The bit is one if the id is taken and 0 if free. For
+ * every master there are 128 channel id's.
+ */
+static struct pti_masterchannel *get_id(u8 *id_array,
+ int max_ids,
+ int base_id,
+ const char *thread_name)
+{
+ struct pti_masterchannel *mc;
+ int i, j, mask;
+
+ mc = kmalloc(sizeof(struct pti_masterchannel), GFP_KERNEL);
+ if (mc == NULL)
+ return NULL;
+
+ /* look for a byte with a free bit */
+ for (i = 0; i < max_ids; i++)
+ if (id_array[i] != 0xff)
+ break;
+ if (i == max_ids) {
+ kfree(mc);
+ return NULL;
+ }
+ /* find the bit in the 128 possible channel opportunities */
+ mask = 0x80;
+ for (j = 0; j < 8; j++) {
+ if ((id_array[i] & mask) == 0)
+ break;
+ mask >>= 1;
+ }
+
+ /* grab it */
+ id_array[i] |= mask;
+ mc->master = base_id;
+ mc->channel = ((i & 0xf)<<3) + j;
+ /* write new master Id / channel Id allocation to channel control */
+ pti_control_frame_built_and_sent(mc, thread_name);
+ return mc;
+}
+
+/*
+ * The following three functions:
+ * pti_request_mastercahannel(), mipi_release_masterchannel()
+ * and pti_writedata() are an API for other kernel drivers to
+ * access PTI.
+ */
+
+/**
+ * pti_request_masterchannel()- Kernel API function used to allocate
+ * a master, channel ID address
+ * to write to PTI HW.
+ *
+ * @type: 0- request Application master, channel aperture ID
+ * write address.
+ * 1- request OS master, channel aperture ID write
+ * address.
+ * 2- request Modem master, channel aperture ID
+ * write address.
+ * Other values, error.
+ * @thread_name: The thread name associated with the master / channel or
+ * 'NULL' if using the 'current' global variable.
+ *
+ * Returns:
+ * pti_masterchannel struct
+ * 0 for error
+ */
+struct pti_masterchannel *pti_request_masterchannel(u8 type,
+ const char *thread_name)
+{
+ struct pti_masterchannel *mc;
+
+ mutex_lock(&alloclock);
+
+ switch (type) {
+
+ case 0:
+ mc = get_id(drv_data->ia_app, MAX_APP_IDS,
+ APP_BASE_ID, thread_name);
+ break;
+
+ case 1:
+ mc = get_id(drv_data->ia_os, MAX_OS_IDS,
+ OS_BASE_ID, thread_name);
+ break;
+
+ case 2:
+ mc = get_id(drv_data->ia_modem, MAX_MODEM_IDS,
+ MODEM_BASE_ID, thread_name);
+ break;
+ default:
+ mc = NULL;
+ }
+
+ mutex_unlock(&alloclock);
+ return mc;
+}
+EXPORT_SYMBOL_GPL(pti_request_masterchannel);
+
+/**
+ * pti_release_masterchannel()- Kernel API function used to release
+ * a master, channel ID address
+ * used to write to PTI HW.
+ *
+ * @mc: master, channel apeture ID address to be released. This
+ * will de-allocate the structure via kfree().
+ */
+void pti_release_masterchannel(struct pti_masterchannel *mc)
+{
+ u8 master, channel, i;
+
+ mutex_lock(&alloclock);
+
+ if (mc) {
+ master = mc->master;
+ channel = mc->channel;
+
+ if (master == APP_BASE_ID) {
+ i = channel >> 3;
+ drv_data->ia_app[i] &= ~(0x80>>(channel & 0x7));
+ } else if (master == OS_BASE_ID) {
+ i = channel >> 3;
+ drv_data->ia_os[i] &= ~(0x80>>(channel & 0x7));
+ } else {
+ i = channel >> 3;
+ drv_data->ia_modem[i] &= ~(0x80>>(channel & 0x7));
+ }
+
+ kfree(mc);
+ }
+
+ mutex_unlock(&alloclock);
+}
+EXPORT_SYMBOL_GPL(pti_release_masterchannel);
+
+/**
+ * pti_writedata()- Kernel API function used to write trace
+ * debugging data to PTI HW.
+ *
+ * @mc: Master, channel aperture ID address to write to.
+ * Null value will return with no write occurring.
+ * @buf: Trace debuging data to write to the PTI HW.
+ * Null value will return with no write occurring.
+ * @count: Size of buf. Value of 0 or a negative number will
+ * return with no write occuring.
+ */
+void pti_writedata(struct pti_masterchannel *mc, u8 *buf, int count)
+{
+ /*
+ * since this function is exported, this is treated like an
+ * API function, thus, all parameters should
+ * be checked for validity.
+ */
+ if ((mc != NULL) && (buf != NULL) && (count > 0))
+ pti_write_to_aperture(mc, buf, count);
+ return;
+}
+EXPORT_SYMBOL_GPL(pti_writedata);
+
+/**
+ * pti_pci_remove()- Driver exit method to remove PTI from
+ * PCI bus.
+ * @pdev: variable containing pci info of PTI.
+ */
+static void __devexit pti_pci_remove(struct pci_dev *pdev)
+{
+ struct pti_dev *drv_data;
+
+ drv_data = pci_get_drvdata(pdev);
+ if (drv_data != NULL) {
+ pci_iounmap(pdev, drv_data->pti_ioaddr);
+ pci_set_drvdata(pdev, NULL);
+ kfree(drv_data);
+ pci_release_region(pdev, 1);
+ pci_disable_device(pdev);
+ }
+}
+
+/*
+ * for the tty_driver_*() basic function descriptions, see tty_driver.h.
+ * Specific header comments made for PTI-related specifics.
+ */
+
+/**
+ * pti_tty_driver_open()- Open an Application master, channel aperture
+ * ID to the PTI device via tty device.
+ *
+ * @tty: tty interface.
+ * @filp: filp interface pased to tty_port_open() call.
+ *
+ * Returns:
+ * int, 0 for success
+ * otherwise, fail value
+ *
+ * The main purpose of using the tty device interface is for
+ * each tty port to have a unique PTI write aperture. In an
+ * example use case, ttyPTI0 gets syslogd and an APP aperture
+ * ID and ttyPTI1 is where the n_tracesink ldisc hooks to route
+ * modem messages into PTI. Modem trace data does not have to
+ * go to ttyPTI1, but ttyPTI0 and ttyPTI1 do need to be distinct
+ * master IDs. These messages go through the PTI HW and out of
+ * the handheld platform and to the Fido/Lauterbach device.
+ */
+static int pti_tty_driver_open(struct tty_struct *tty, struct file *filp)
+{
+ /*
+ * we actually want to allocate a new channel per open, per
+ * system arch. HW gives more than plenty channels for a single
+ * system task to have its own channel to write trace data. This
+ * also removes a locking requirement for the actual write
+ * procedure.
+ */
+ return tty_port_open(&drv_data->port, tty, filp);
+}
+
+/**
+ * pti_tty_driver_close()- close tty device and release Application
+ * master, channel aperture ID to the PTI device via tty device.
+ *
+ * @tty: tty interface.
+ * @filp: filp interface pased to tty_port_close() call.
+ *
+ * The main purpose of using the tty device interface is to route
+ * syslog daemon messages to the PTI HW and out of the handheld platform
+ * and to the Fido/Lauterbach device.
+ */
+static void pti_tty_driver_close(struct tty_struct *tty, struct file *filp)
+{
+ tty_port_close(&drv_data->port, tty, filp);
+}
+
+/**
+ * pti_tty_install()- Used to set up specific master-channels
+ * to tty ports for organizational purposes when
+ * tracing viewed from debuging tools.
+ *
+ * @driver: tty driver information.
+ * @tty: tty struct containing pti information.
+ *
+ * Returns:
+ * 0 for success
+ * otherwise, error
+ */
+static int pti_tty_install(struct tty_driver *driver, struct tty_struct *tty)
+{
+ int idx = tty->index;
+ struct pti_tty *pti_tty_data;
+ int ret = tty_standard_install(driver, tty);
+
+ if (ret == 0) {
+ pti_tty_data = kmalloc(sizeof(struct pti_tty), GFP_KERNEL);
+ if (pti_tty_data == NULL)
+ return -ENOMEM;
+
+ if (idx == PTITTY_MINOR_START)
+ pti_tty_data->mc = pti_request_masterchannel(0, NULL);
+ else
+ pti_tty_data->mc = pti_request_masterchannel(2, NULL);
+
+ if (pti_tty_data->mc == NULL) {
+ kfree(pti_tty_data);
+ return -ENXIO;
+ }
+ tty->driver_data = pti_tty_data;
+ }
+
+ return ret;
+}
+
+/**
+ * pti_tty_cleanup()- Used to de-allocate master-channel resources
+ * tied to tty's of this driver.
+ *
+ * @tty: tty struct containing pti information.
+ */
+static void pti_tty_cleanup(struct tty_struct *tty)
+{
+ struct pti_tty *pti_tty_data = tty->driver_data;
+ if (pti_tty_data == NULL)
+ return;
+ pti_release_masterchannel(pti_tty_data->mc);
+ kfree(pti_tty_data);
+ tty->driver_data = NULL;
+}
+
+/**
+ * pti_tty_driver_write()- Write trace debugging data through the char
+ * interface to the PTI HW. Part of the misc device implementation.
+ *
+ * @filp: Contains private data which is used to obtain
+ * master, channel write ID.
+ * @data: trace data to be written.
+ * @len: # of byte to write.
+ *
+ * Returns:
+ * int, # of bytes written
+ * otherwise, error
+ */
+static int pti_tty_driver_write(struct tty_struct *tty,
+ const unsigned char *buf, int len)
+{
+ struct pti_tty *pti_tty_data = tty->driver_data;
+ if ((pti_tty_data != NULL) && (pti_tty_data->mc != NULL)) {
+ pti_write_to_aperture(pti_tty_data->mc, (u8 *)buf, len);
+ return len;
+ }
+ /*
+ * we can't write to the pti hardware if the private driver_data
+ * and the mc address is not there.
+ */
+ else
+ return -EFAULT;
+}
+
+/**
+ * pti_tty_write_room()- Always returns 2048.
+ *
+ * @tty: contains tty info of the pti driver.
+ */
+static int pti_tty_write_room(struct tty_struct *tty)
+{
+ return 2048;
+}
+
+/**
+ * pti_char_open()- Open an Application master, channel aperture
+ * ID to the PTI device. Part of the misc device implementation.
+ *
+ * @inode: not used.
+ * @filp: Output- will have a masterchannel struct set containing
+ * the allocated application PTI aperture write address.
+ *
+ * Returns:
+ * int, 0 for success
+ * otherwise, a fail value
+ */
+static int pti_char_open(struct inode *inode, struct file *filp)
+{
+ struct pti_masterchannel *mc;
+
+ /*
+ * We really do want to fail immediately if
+ * pti_request_masterchannel() fails,
+ * before assigning the value to filp->private_data.
+ * Slightly easier to debug if this driver needs debugging.
+ */
+ mc = pti_request_masterchannel(0, NULL);
+ if (mc == NULL)
+ return -ENOMEM;
+ filp->private_data = mc;
+ return 0;
+}
+
+/**
+ * pti_char_release()- Close a char channel to the PTI device. Part
+ * of the misc device implementation.
+ *
+ * @inode: Not used in this implementaiton.
+ * @filp: Contains private_data that contains the master, channel
+ * ID to be released by the PTI device.
+ *
+ * Returns:
+ * always 0
+ */
+static int pti_char_release(struct inode *inode, struct file *filp)
+{
+ pti_release_masterchannel(filp->private_data);
+ filp->private_data = NULL;
+ return 0;
+}
+
+/**
+ * pti_char_write()- Write trace debugging data through the char
+ * interface to the PTI HW. Part of the misc device implementation.
+ *
+ * @filp: Contains private data which is used to obtain
+ * master, channel write ID.
+ * @data: trace data to be written.
+ * @len: # of byte to write.
+ * @ppose: Not used in this function implementation.
+ *
+ * Returns:
+ * int, # of bytes written
+ * otherwise, error value
+ *
+ * Notes: From side discussions with Alan Cox and experimenting
+ * with PTI debug HW like Nokia's Fido box and Lauterbach
+ * devices, 8192 byte write buffer used by USER_COPY_SIZE was
+ * deemed an appropriate size for this type of usage with
+ * debugging HW.
+ */
+static ssize_t pti_char_write(struct file *filp, const char __user *data,
+ size_t len, loff_t *ppose)
+{
+ struct pti_masterchannel *mc;
+ void *kbuf;
+ const char __user *tmp;
+ size_t size = USER_COPY_SIZE;
+ size_t n = 0;
+
+ tmp = data;
+ mc = filp->private_data;
+
+ kbuf = kmalloc(size, GFP_KERNEL);
+ if (kbuf == NULL) {
+ pr_err("%s(%d): buf allocation failed\n",
+ __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ do {
+ if (len - n > USER_COPY_SIZE)
+ size = USER_COPY_SIZE;
+ else
+ size = len - n;
+
+ if (copy_from_user(kbuf, tmp, size)) {
+ kfree(kbuf);
+ return n ? n : -EFAULT;
+ }
+
+ pti_write_to_aperture(mc, kbuf, size);
+ n += size;
+ tmp += size;
+
+ } while (len > n);
+
+ kfree(kbuf);
+ return len;
+}
+
+static const struct tty_operations pti_tty_driver_ops = {
+ .open = pti_tty_driver_open,
+ .close = pti_tty_driver_close,
+ .write = pti_tty_driver_write,
+ .write_room = pti_tty_write_room,
+ .install = pti_tty_install,
+ .cleanup = pti_tty_cleanup
+};
+
+static const struct file_operations pti_char_driver_ops = {
+ .owner = THIS_MODULE,
+ .write = pti_char_write,
+ .open = pti_char_open,
+ .release = pti_char_release,
+};
+
+static struct miscdevice pti_char_driver = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = CHARNAME,
+ .fops = &pti_char_driver_ops
+};
+
+/**
+ * pti_console_write()- Write to the console that has been acquired.
+ *
+ * @c: Not used in this implementaiton.
+ * @buf: Data to be written.
+ * @len: Length of buf.
+ */
+static void pti_console_write(struct console *c, const char *buf, unsigned len)
+{
+ static struct pti_masterchannel mc = {.master = CONSOLE_ID,
+ .channel = 0};
+
+ mc.channel = pti_console_channel;
+ pti_console_channel = (pti_console_channel + 1) & 0x7f;
+
+ pti_write_full_frame_to_aperture(&mc, buf, len);
+}
+
+/**
+ * pti_console_device()- Return the driver tty structure and set the
+ * associated index implementation.
+ *
+ * @c: Console device of the driver.
+ * @index: index associated with c.
+ *
+ * Returns:
+ * always value of pti_tty_driver structure when this function
+ * is called.
+ */
+static struct tty_driver *pti_console_device(struct console *c, int *index)
+{
+ *index = c->index;
+ return pti_tty_driver;
+}
+
+/**
+ * pti_console_setup()- Initialize console variables used by the driver.
+ *
+ * @c: Not used.
+ * @opts: Not used.
+ *
+ * Returns:
+ * always 0.
+ */
+static int pti_console_setup(struct console *c, char *opts)
+{
+ pti_console_channel = 0;
+ pti_control_channel = 0;
+ return 0;
+}
+
+/*
+ * pti_console struct, used to capture OS printk()'s and shift
+ * out to the PTI device for debugging. This cannot be
+ * enabled upon boot because of the possibility of eating
+ * any serial console printk's (race condition discovered).
+ * The console should be enabled upon when the tty port is
+ * used for the first time. Since the primary purpose for
+ * the tty port is to hook up syslog to it, the tty port
+ * will be open for a really long time.
+ */
+static struct console pti_console = {
+ .name = TTYNAME,
+ .write = pti_console_write,
+ .device = pti_console_device,
+ .setup = pti_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = 0,
+};
+
+/**
+ * pti_port_activate()- Used to start/initialize any items upon
+ * first opening of tty_port().
+ *
+ * @port- The tty port number of the PTI device.
+ * @tty- The tty struct associated with this device.
+ *
+ * Returns:
+ * always returns 0
+ *
+ * Notes: The primary purpose of the PTI tty port 0 is to hook
+ * the syslog daemon to it; thus this port will be open for a
+ * very long time.
+ */
+static int pti_port_activate(struct tty_port *port, struct tty_struct *tty)
+{
+ if (port->tty->index == PTITTY_MINOR_START)
+ console_start(&pti_console);
+ return 0;
+}
+
+/**
+ * pti_port_shutdown()- Used to stop/shutdown any items upon the
+ * last tty port close.
+ *
+ * @port- The tty port number of the PTI device.
+ *
+ * Notes: The primary purpose of the PTI tty port 0 is to hook
+ * the syslog daemon to it; thus this port will be open for a
+ * very long time.
+ */
+static void pti_port_shutdown(struct tty_port *port)
+{
+ if (port->tty->index == PTITTY_MINOR_START)
+ console_stop(&pti_console);
+}
+
+static const struct tty_port_operations tty_port_ops = {
+ .activate = pti_port_activate,
+ .shutdown = pti_port_shutdown,
+};
+
+/*
+ * Note the _probe() call sets everything up and ties the char and tty
+ * to successfully detecting the PTI device on the pci bus.
+ */
+
+/**
+ * pti_pci_probe()- Used to detect pti on the pci bus and set
+ * things up in the driver.
+ *
+ * @pdev- pci_dev struct values for pti.
+ * @ent- pci_device_id struct for pti driver.
+ *
+ * Returns:
+ * 0 for success
+ * otherwise, error
+ */
+static int __devinit pti_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int retval = -EINVAL;
+ int pci_bar = 1;
+
+ dev_dbg(&pdev->dev, "%s %s(%d): PTI PCI ID %04x:%04x\n", __FILE__,
+ __func__, __LINE__, pdev->vendor, pdev->device);
+
+ retval = misc_register(&pti_char_driver);
+ if (retval) {
+ pr_err("%s(%d): CHAR registration failed of pti driver\n",
+ __func__, __LINE__);
+ pr_err("%s(%d): Error value returned: %d\n",
+ __func__, __LINE__, retval);
+ return retval;
+ }
+
+ retval = pci_enable_device(pdev);
+ if (retval != 0) {
+ dev_err(&pdev->dev,
+ "%s: pci_enable_device() returned error %d\n",
+ __func__, retval);
+ return retval;
+ }
+
+ drv_data = kzalloc(sizeof(*drv_data), GFP_KERNEL);
+
+ if (drv_data == NULL) {
+ retval = -ENOMEM;
+ dev_err(&pdev->dev,
+ "%s(%d): kmalloc() returned NULL memory.\n",
+ __func__, __LINE__);
+ return retval;
+ }
+ drv_data->pti_addr = pci_resource_start(pdev, pci_bar);
+
+ retval = pci_request_region(pdev, pci_bar, dev_name(&pdev->dev));
+ if (retval != 0) {
+ dev_err(&pdev->dev,
+ "%s(%d): pci_request_region() returned error %d\n",
+ __func__, __LINE__, retval);
+ kfree(drv_data);
+ return retval;
+ }
+ drv_data->aperture_base = drv_data->pti_addr+APERTURE_14;
+ drv_data->pti_ioaddr =
+ ioremap_nocache((u32)drv_data->aperture_base,
+ APERTURE_LEN);
+ if (!drv_data->pti_ioaddr) {
+ pci_release_region(pdev, pci_bar);
+ retval = -ENOMEM;
+ kfree(drv_data);
+ return retval;
+ }
+
+ pci_set_drvdata(pdev, drv_data);
+
+ tty_port_init(&drv_data->port);
+ drv_data->port.ops = &tty_port_ops;
+
+ tty_register_device(pti_tty_driver, 0, &pdev->dev);
+ tty_register_device(pti_tty_driver, 1, &pdev->dev);
+
+ register_console(&pti_console);
+
+ return retval;
+}
+
+static struct pci_driver pti_pci_driver = {
+ .name = PCINAME,
+ .id_table = pci_ids,
+ .probe = pti_pci_probe,
+ .remove = pti_pci_remove,
+};
+
+/**
+ *
+ * pti_init()- Overall entry/init call to the pti driver.
+ * It starts the registration process with the kernel.
+ *
+ * Returns:
+ * int __init, 0 for success
+ * otherwise value is an error
+ *
+ */
+static int __init pti_init(void)
+{
+ int retval = -EINVAL;
+
+ /* First register module as tty device */
+
+ pti_tty_driver = alloc_tty_driver(PTITTY_MINOR_NUM);
+ if (pti_tty_driver == NULL) {
+ pr_err("%s(%d): Memory allocation failed for ptiTTY driver\n",
+ __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ pti_tty_driver->driver_name = DRIVERNAME;
+ pti_tty_driver->name = TTYNAME;
+ pti_tty_driver->major = 0;
+ pti_tty_driver->minor_start = PTITTY_MINOR_START;
+ pti_tty_driver->type = TTY_DRIVER_TYPE_SYSTEM;
+ pti_tty_driver->subtype = SYSTEM_TYPE_SYSCONS;
+ pti_tty_driver->flags = TTY_DRIVER_REAL_RAW |
+ TTY_DRIVER_DYNAMIC_DEV;
+ pti_tty_driver->init_termios = tty_std_termios;
+
+ tty_set_operations(pti_tty_driver, &pti_tty_driver_ops);
+
+ retval = tty_register_driver(pti_tty_driver);
+ if (retval) {
+ pr_err("%s(%d): TTY registration failed of pti driver\n",
+ __func__, __LINE__);
+ pr_err("%s(%d): Error value returned: %d\n",
+ __func__, __LINE__, retval);
+
+ pti_tty_driver = NULL;
+ return retval;
+ }
+
+ retval = pci_register_driver(&pti_pci_driver);
+
+ if (retval) {
+ pr_err("%s(%d): PCI registration failed of pti driver\n",
+ __func__, __LINE__);
+ pr_err("%s(%d): Error value returned: %d\n",
+ __func__, __LINE__, retval);
+
+ tty_unregister_driver(pti_tty_driver);
+ pr_err("%s(%d): Unregistering TTY part of pti driver\n",
+ __func__, __LINE__);
+ pti_tty_driver = NULL;
+ return retval;
+ }
+
+ return retval;
+}
+
+/**
+ * pti_exit()- Unregisters this module as a tty and pci driver.
+ */
+static void __exit pti_exit(void)
+{
+ int retval;
+
+ tty_unregister_device(pti_tty_driver, 0);
+ tty_unregister_device(pti_tty_driver, 1);
+
+ retval = tty_unregister_driver(pti_tty_driver);
+ if (retval) {
+ pr_err("%s(%d): TTY unregistration failed of pti driver\n",
+ __func__, __LINE__);
+ pr_err("%s(%d): Error value returned: %d\n",
+ __func__, __LINE__, retval);
+ }
+
+ pci_unregister_driver(&pti_pci_driver);
+
+ retval = misc_deregister(&pti_char_driver);
+ if (retval) {
+ pr_err("%s(%d): CHAR unregistration failed of pti driver\n",
+ __func__, __LINE__);
+ pr_err("%s(%d): Error value returned: %d\n",
+ __func__, __LINE__, retval);
+ }
+
+ unregister_console(&pti_console);
+ return;
+}
+
+module_init(pti_init);
+module_exit(pti_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ken Mills, Jay Freyensee");
+MODULE_DESCRIPTION("PTI Driver");
+
diff --git a/drivers/misc/rsa/Kconfig b/drivers/misc/rsa/Kconfig
new file mode 100755
index 00000000..30039d81
--- /dev/null
+++ b/drivers/misc/rsa/Kconfig
@@ -0,0 +1,6 @@
+menu "WMT RSA "
+config WMT_RSA
+ tristate "RSA"
+ help
+ This enables the RSA decode/encode
+endmenu
diff --git a/drivers/misc/rsa/Makefile b/drivers/misc/rsa/Makefile
new file mode 100755
index 00000000..be017d93
--- /dev/null
+++ b/drivers/misc/rsa/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for rsa drivers.
+#
+
+#obj-$(CONFIG_WMT_RSA) += bignum.o rsa_verify.o
+
+obj-y += bignum.o rsa_verify.o
+
diff --git a/drivers/misc/rsa/asn1.h b/drivers/misc/rsa/asn1.h
new file mode 100755
index 00000000..20ced912
--- /dev/null
+++ b/drivers/misc/rsa/asn1.h
@@ -0,0 +1,244 @@
+/**
+ * \file asn1.h
+ *
+ * \brief Generic ASN.1 parsing
+ *
+ * Copyright (C) 2006-2011, Brainspark B.V.
+ *
+ * This file is part of PolarSSL (http://www.polarssl.org)
+ * Lead Maintainer: Paul Bakker <polarssl_maintainer at polarssl.org>
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#ifndef POLARSSL_ASN1_H
+#define POLARSSL_ASN1_H
+
+//#include "config.h"
+
+//#if defined(POLARSSL_BIGNUM_C)
+#include "bignum.h"
+//#endif
+
+//#include <string.h>
+
+/**
+ * \addtogroup asn1_module
+ * \{
+ */
+
+/**
+ * \name ASN1 Error codes
+ * These error codes are OR'ed to X509 error codes for
+ * higher error granularity.
+ * ASN1 is a standard to specify data structures.
+ * \{
+ */
+#define POLARSSL_ERR_ASN1_OUT_OF_DATA -0x0014 /**< Out of data when parsing an ASN1 data structure. */
+#define POLARSSL_ERR_ASN1_UNEXPECTED_TAG -0x0016 /**< ASN1 tag was of an unexpected value. */
+#define POLARSSL_ERR_ASN1_INVALID_LENGTH -0x0018 /**< Error when trying to determine the length or invalid length. */
+#define POLARSSL_ERR_ASN1_LENGTH_MISMATCH -0x001A /**< Actual length differs from expected length. */
+#define POLARSSL_ERR_ASN1_INVALID_DATA -0x001C /**< Data is invalid. (not used) */
+#define POLARSSL_ERR_ASN1_MALLOC_FAILED -0x001E /**< Memory allocation failed */
+/* \} name */
+
+/**
+ * \name DER constants
+ * These constants comply with DER encoded the ANS1 type tags.
+ * DER encoding uses hexadecimal representation.
+ * An example DER sequence is:\n
+ * - 0x02 -- tag indicating INTEGER
+ * - 0x01 -- length in octets
+ * - 0x05 -- value
+ * Such sequences are typically read into \c ::x509_buf.
+ * \{
+ */
+#define ASN1_BOOLEAN 0x01
+#define ASN1_INTEGER 0x02
+#define ASN1_BIT_STRING 0x03
+#define ASN1_OCTET_STRING 0x04
+#define ASN1_NULL 0x05
+#define ASN1_OID 0x06
+#define ASN1_UTF8_STRING 0x0C
+#define ASN1_SEQUENCE 0x10
+#define ASN1_SET 0x11
+#define ASN1_PRINTABLE_STRING 0x13
+#define ASN1_T61_STRING 0x14
+#define ASN1_IA5_STRING 0x16
+#define ASN1_UTC_TIME 0x17
+#define ASN1_GENERALIZED_TIME 0x18
+#define ASN1_UNIVERSAL_STRING 0x1C
+#define ASN1_BMP_STRING 0x1E
+#define ASN1_PRIMITIVE 0x00
+#define ASN1_CONSTRUCTED 0x20
+#define ASN1_CONTEXT_SPECIFIC 0x80
+/* \} name */
+/* \} addtogroup asn1_module */
+
+/** Returns the size of the binary string, without the trailing \\0 */
+#define OID_SIZE(x) (sizeof(x) - 1)
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * \name Functions to parse ASN.1 data structures
+ * \{
+ */
+
+/**
+ * Type-length-value structure that allows for ASN1 using DER.
+ */
+typedef struct _asn1_buf
+{
+ int tag; /**< ASN1 type, e.g. ASN1_UTF8_STRING. */
+ size_t len; /**< ASN1 length, e.g. in octets. */
+ unsigned char *p; /**< ASN1 data, e.g. in ASCII. */
+}
+asn1_buf;
+
+/**
+ * Container for ASN1 bit strings.
+ */
+typedef struct _asn1_bitstring
+{
+ size_t len; /**< ASN1 length, e.g. in octets. */
+ unsigned char unused_bits; /**< Number of unused bits at the end of the string */
+ unsigned char *p; /**< Raw ASN1 data for the bit string */
+}
+asn1_bitstring;
+
+/**
+ * Container for a sequence of ASN.1 items
+ */
+typedef struct _asn1_sequence
+{
+ asn1_buf buf; /**< Buffer containing the given ASN.1 item. */
+ struct _asn1_sequence *next; /**< The next entry in the sequence. */
+}
+asn1_sequence;
+
+/**
+ * Get the length of an ASN.1 element.
+ * Updates the pointer to immediately behind the length.
+ *
+ * \param p The position in the ASN.1 data
+ * \param end End of data
+ * \param len The variable that will receive the value
+ *
+ * \return 0 if successful, POLARSSL_ERR_ASN1_OUT_OF_DATA on reaching
+ * end of data, POLARSSL_ERR_ASN1_INVALID_LENGTH if length is
+ * unparseable.
+ */
+int asn1_get_len( unsigned char **p,
+ const unsigned char *end,
+ size_t *len );
+
+/**
+ * Get the tag and length of the tag. Check for the requested tag.
+ * Updates the pointer to immediately behind the tag and length.
+ *
+ * \param p The position in the ASN.1 data
+ * \param end End of data
+ * \param len The variable that will receive the length
+ * \param tag The expected tag
+ *
+ * \return 0 if successful, POLARSSL_ERR_ASN1_UNEXPECTED_TAG if tag did
+ * not match requested tag, or another specific ASN.1 error code.
+ */
+int asn1_get_tag( unsigned char **p,
+ const unsigned char *end,
+ size_t *len, int tag );
+
+/**
+ * Retrieve a boolean ASN.1 tag and its value.
+ * Updates the pointer to immediately behind the full tag.
+ *
+ * \param p The position in the ASN.1 data
+ * \param end End of data
+ * \param val The variable that will receive the value
+ *
+ * \return 0 if successful or a specific ASN.1 error code.
+ */
+int asn1_get_bool( unsigned char **p,
+ const unsigned char *end,
+ int *val );
+
+/**
+ * Retrieve an integer ASN.1 tag and its value.
+ * Updates the pointer to immediately behind the full tag.
+ *
+ * \param p The position in the ASN.1 data
+ * \param end End of data
+ * \param val The variable that will receive the value
+ *
+ * \return 0 if successful or a specific ASN.1 error code.
+ */
+int asn1_get_int( unsigned char **p,
+ const unsigned char *end,
+ int *val );
+
+/**
+ * Retrieve a bitstring ASN.1 tag and its value.
+ * Updates the pointer to immediately behind the full tag.
+ *
+ * \param p The position in the ASN.1 data
+ * \param end End of data
+ * \param bs The variable that will receive the value
+ *
+ * \return 0 if successful or a specific ASN.1 error code.
+ */
+int asn1_get_bitstring( unsigned char **p, const unsigned char *end,
+ asn1_bitstring *bs);
+
+/**
+ * Parses and splits an ASN.1 "SEQUENCE OF <tag>"
+ * Updated the pointer to immediately behind the full sequence tag.
+ *
+ * \param p The position in the ASN.1 data
+ * \param end End of data
+ * \param cur First variable in the chain to fill
+ * \param tag Type of sequence
+ *
+ * \return 0 if successful or a specific ASN.1 error code.
+ */
+int asn1_get_sequence_of( unsigned char **p,
+ const unsigned char *end,
+ asn1_sequence *cur,
+ int tag);
+
+#if defined(POLARSSL_BIGNUM_C)
+/**
+ * Retrieve a MPI value from an integer ASN.1 tag.
+ * Updates the pointer to immediately behind the full tag.
+ *
+ * \param p The position in the ASN.1 data
+ * \param end End of data
+ * \param X The MPI that will receive the value
+ *
+ * \return 0 if successful or a specific ASN.1 or MPI error code.
+ */
+int asn1_get_mpi( unsigned char **p,
+ const unsigned char *end,
+ mpi *X );
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* asn1.h */
diff --git a/drivers/misc/rsa/base64.h b/drivers/misc/rsa/base64.h
new file mode 100755
index 00000000..355116d7
--- /dev/null
+++ b/drivers/misc/rsa/base64.h
@@ -0,0 +1,87 @@
+/**
+ * \file base64.h
+ *
+ * \brief RFC 1521 base64 encoding/decoding
+ *
+ * Copyright (C) 2006-2010, Brainspark B.V.
+ *
+ * This file is part of PolarSSL (http://www.polarssl.org)
+ * Lead Maintainer: Paul Bakker <polarssl_maintainer at polarssl.org>
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#ifndef POLARSSL_BASE64_H
+#define POLARSSL_BASE64_H
+
+//#include <string.h>
+
+#define POLARSSL_ERR_BASE64_BUFFER_TOO_SMALL -0x002A /**< Output buffer too small. */
+#define POLARSSL_ERR_BASE64_INVALID_CHARACTER -0x002C /**< Invalid character in input. */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * \brief Encode a buffer into base64 format
+ *
+ * \param dst destination buffer
+ * \param dlen size of the buffer
+ * \param src source buffer
+ * \param slen amount of data to be encoded
+ *
+ * \return 0 if successful, or POLARSSL_ERR_BASE64_BUFFER_TOO_SMALL.
+ * *dlen is always updated to reflect the amount
+ * of data that has (or would have) been written.
+ *
+ * \note Call this function with *dlen = 0 to obtain the
+ * required buffer size in *dlen
+ */
+int base64_encode( unsigned char *dst, size_t *dlen,
+ const unsigned char *src, size_t slen );
+
+/**
+ * \brief Decode a base64-formatted buffer
+ *
+ * \param dst destination buffer
+ * \param dlen size of the buffer
+ * \param src source buffer
+ * \param slen amount of data to be decoded
+ *
+ * \return 0 if successful, POLARSSL_ERR_BASE64_BUFFER_TOO_SMALL, or
+ * POLARSSL_ERR_BASE64_INVALID_DATA if the input data is not
+ * correct. *dlen is always updated to reflect the amount
+ * of data that has (or would have) been written.
+ *
+ * \note Call this function with *dlen = 0 to obtain the
+ * required buffer size in *dlen
+ */
+int base64_decode( unsigned char *dst, size_t *dlen,
+ const unsigned char *src, size_t slen );
+
+/**
+ * \brief Checkup routine
+ *
+ * \return 0 if successful, or 1 if the test failed
+ */
+int base64_self_test( int verbose );
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* base64.h */
diff --git a/drivers/misc/rsa/bignum.c b/drivers/misc/rsa/bignum.c
new file mode 100755
index 00000000..01809e11
--- /dev/null
+++ b/drivers/misc/rsa/bignum.c
@@ -0,0 +1,2132 @@
+/*
+ * Multi-precision integer library
+ *
+ * Copyright (C) 2006-2010, Brainspark B.V.
+ *
+ * This file is part of PolarSSL (http://www.polarssl.org)
+ * Lead Maintainer: Paul Bakker <polarssl_maintainer at polarssl.org>
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+/*
+ * This MPI implementation is based on:
+ *
+ * http://www.cacr.math.uwaterloo.ca/hac/about/chap14.pdf
+ * http://www.stillhq.com/extracted/gnupg-api/mpi/
+ * http://math.libtomcrypt.com/files/tommath.pdf
+ */
+
+//#include "polarssl/config.h"
+
+#if 1
+
+//#include "polarssl/bn_mul.h"
+#include "bignum.h"
+#include "dhm.h"
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/mutex.h>
+#include <linux/backing-dev.h>
+#include <linux/compat.h>
+#include <linux/mount.h>
+#include <linux/blkpg.h>
+
+#include <linux/vmalloc.h>
+#include <asm/uaccess.h>
+
+#include <linux/types.h>
+#include <linux/moduleparam.h>
+#include <linux/delay.h>
+#include <mach/hardware.h>
+
+#define ciL (sizeof(t_uint)) /* chars in limb */
+#define biL (ciL << 3) /* bits in limb */
+#define biH (ciL << 2) /* half limb size */
+
+/*
+ * Convert between bits/chars and number of limbs
+ */
+#define BITS_TO_LIMBS(i) (((i) + biL - 1) / biL)
+#define CHARS_TO_LIMBS(i) (((i) + ciL - 1) / ciL)
+
+/*
+ * Initialize one MPI
+ */
+void mpi_init( mpi *X )
+{
+ if( X == NULL )
+ return;
+
+ X->s = 1;
+ X->n = 0;
+ X->p = NULL;
+}
+
+/*
+ * Unallocate one MPI
+ */
+void mpi_free( mpi *X )
+{
+ if( X == NULL )
+ return;
+
+ if( X->p != NULL )
+ {
+ memset( X->p, 0, X->n * ciL );
+ vfree( X->p );
+ }
+
+ X->s = 1;
+ X->n = 0;
+ X->p = NULL;
+}
+
+/*
+ * Enlarge to the specified number of limbs
+ */
+int mpi_grow( mpi *X, size_t nblimbs )
+{
+ t_uint *p;
+
+ if( nblimbs > POLARSSL_MPI_MAX_LIMBS )
+ return( POLARSSL_ERR_MPI_MALLOC_FAILED );
+
+ if( X->n < nblimbs )
+ {
+ if( ( p = (t_uint *) vmalloc( nblimbs * ciL ) ) == NULL )
+ return( POLARSSL_ERR_MPI_MALLOC_FAILED );
+
+ memset( p, 0, nblimbs * ciL );
+
+ if( X->p != NULL )
+ {
+ memcpy( p, X->p, X->n * ciL );
+ memset( X->p, 0, X->n * ciL );
+ vfree( X->p );
+ }
+
+ X->n = nblimbs;
+ X->p = p;
+ }
+
+ return( 0 );
+}
+
+/*
+ * Copy the contents of Y into X
+ */
+int mpi_copy( mpi *X, const mpi *Y )
+{
+ int ret;
+ size_t i;
+
+ if( X == Y )
+ return( 0 );
+
+ for( i = Y->n - 1; i > 0; i-- )
+ if( Y->p[i] != 0 )
+ break;
+ i++;
+
+ X->s = Y->s;
+
+ MPI_CHK( mpi_grow( X, i ) );
+
+ memset( X->p, 0, X->n * ciL );
+ memcpy( X->p, Y->p, i * ciL );
+
+cleanup:
+
+ return( ret );
+}
+
+/*
+ * Swap the contents of X and Y
+ */
+void mpi_swap( mpi *X, mpi *Y )
+{
+ mpi T;
+
+ memcpy( &T, X, sizeof( mpi ) );
+ memcpy( X, Y, sizeof( mpi ) );
+ memcpy( Y, &T, sizeof( mpi ) );
+}
+
+/*
+ * Set value from integer
+ */
+int mpi_lset( mpi *X, t_sint z )
+{
+ int ret;
+
+ MPI_CHK( mpi_grow( X, 1 ) );
+ memset( X->p, 0, X->n * ciL );
+
+ X->p[0] = ( z < 0 ) ? -z : z;
+ X->s = ( z < 0 ) ? -1 : 1;
+
+cleanup:
+
+ return( ret );
+}
+
+/*
+ * Get a specific bit
+ */
+int mpi_get_bit( mpi *X, size_t pos )
+{
+ if( X->n * biL <= pos )
+ return( 0 );
+
+ return ( X->p[pos / biL] >> ( pos % biL ) ) & 0x01;
+}
+
+/*
+ * Set a bit to a specific value of 0 or 1
+ */
+int mpi_set_bit( mpi *X, size_t pos, unsigned char val )
+{
+ int ret = 0;
+ size_t off = pos / biL;
+ size_t idx = pos % biL;
+
+ if( val != 0 && val != 1 )
+ return POLARSSL_ERR_MPI_BAD_INPUT_DATA;
+
+ if( X->n * biL <= pos )
+ {
+ if( val == 0 )
+ return ( 0 );
+
+ MPI_CHK( mpi_grow( X, off + 1 ) );
+ }
+
+ X->p[off] = ( X->p[off] & ~( 0x01 << idx ) ) | ( val << idx );
+
+cleanup:
+
+ return( ret );
+}
+
+/*
+ * Return the number of least significant bits
+ */
+size_t mpi_lsb( const mpi *X )
+{
+ size_t i, j, count = 0;
+
+ for( i = 0; i < X->n; i++ )
+ for( j = 0; j < biL; j++, count++ )
+ if( ( ( X->p[i] >> j ) & 1 ) != 0 )
+ return( count );
+
+ return( 0 );
+}
+
+/*
+ * Return the number of most significant bits
+ */
+size_t mpi_msb( const mpi *X )
+{
+ size_t i, j;
+
+ for( i = X->n - 1; i > 0; i-- )
+ if( X->p[i] != 0 )
+ break;
+
+ for( j = biL; j > 0; j-- )
+ if( ( ( X->p[i] >> ( j - 1 ) ) & 1 ) != 0 )
+ break;
+
+ return( ( i * biL ) + j );
+}
+
+/*
+ * Return the total size in bytes
+ */
+size_t mpi_size( const mpi *X )
+{
+ return( ( mpi_msb( X ) + 7 ) >> 3 );
+}
+
+/*
+ * Convert an ASCII character to digit value
+ */
+static int mpi_get_digit( t_uint *d, int radix, char c )
+{
+ *d = 255;
+
+ if( c >= 0x30 && c <= 0x39 ) *d = c - 0x30;
+ if( c >= 0x41 && c <= 0x46 ) *d = c - 0x37;
+ if( c >= 0x61 && c <= 0x66 ) *d = c - 0x57;
+
+ if( *d >= (t_uint) radix )
+ return( POLARSSL_ERR_MPI_INVALID_CHARACTER );
+
+ return( 0 );
+}
+
+/*
+ * Import from an ASCII string
+ */
+int mpi_read_string( mpi *X, int radix, const char *s )
+{
+ int ret;
+ size_t i, j, slen, n;
+ t_uint d;
+ mpi T;
+
+ if( radix < 2 || radix > 16 )
+ return( POLARSSL_ERR_MPI_BAD_INPUT_DATA );
+
+ mpi_init( &T );
+
+ slen = strlen( s );
+
+ if( radix == 16 )
+ {
+ n = BITS_TO_LIMBS( slen << 2 );
+
+ MPI_CHK( mpi_grow( X, n ) );
+ MPI_CHK( mpi_lset( X, 0 ) );
+
+ for( i = slen, j = 0; i > 0; i--, j++ )
+ {
+ if( i == 1 && s[i - 1] == '-' )
+ {
+ X->s = -1;
+ break;
+ }
+
+ MPI_CHK( mpi_get_digit( &d, radix, s[i - 1] ) );
+ X->p[j / (2 * ciL)] |= d << ( (j % (2 * ciL)) << 2 );
+ }
+ }
+ else
+ {
+ MPI_CHK( mpi_lset( X, 0 ) );
+
+ for( i = 0; i < slen; i++ )
+ {
+ if( i == 0 && s[i] == '-' )
+ {
+ X->s = -1;
+ continue;
+ }
+
+ MPI_CHK( mpi_get_digit( &d, radix, s[i] ) );
+ MPI_CHK( mpi_mul_int( &T, X, radix ) );
+
+ if( X->s == 1 )
+ {
+ MPI_CHK( mpi_add_int( X, &T, d ) );
+ }
+ else
+ {
+ MPI_CHK( mpi_sub_int( X, &T, d ) );
+ }
+ }
+ }
+
+cleanup:
+
+ mpi_free( &T );
+
+ return( ret );
+}
+
+/*
+ * Helper to write the digits high-order first
+ */
+static int mpi_write_hlp( mpi *X, int radix, char **p )
+{
+ int ret;
+ t_uint r;
+
+ if( radix < 2 || radix > 16 )
+ return( POLARSSL_ERR_MPI_BAD_INPUT_DATA );
+
+ MPI_CHK( mpi_mod_int( &r, X, radix ) );
+ MPI_CHK( mpi_div_int( X, NULL, X, radix ) );
+
+ if( mpi_cmp_int( X, 0 ) != 0 )
+ MPI_CHK( mpi_write_hlp( X, radix, p ) );
+
+ if( r < 10 )
+ *(*p)++ = (char)( r + 0x30 );
+ else
+ *(*p)++ = (char)( r + 0x37 );
+
+cleanup:
+
+ return( ret );
+}
+
+/*
+ * Export into an ASCII string
+ */
+int mpi_write_string( const mpi *X, int radix, char *s, size_t *slen )
+{
+ int ret = 0;
+ size_t n;
+ char *p;
+ mpi T;
+
+ if( radix < 2 || radix > 16 )
+ return( POLARSSL_ERR_MPI_BAD_INPUT_DATA );
+
+ n = mpi_msb( X );
+ if( radix >= 4 ) n >>= 1;
+ if( radix >= 16 ) n >>= 1;
+ n += 3;
+
+ if( *slen < n )
+ {
+ *slen = n;
+ return( POLARSSL_ERR_MPI_BUFFER_TOO_SMALL );
+ }
+
+ p = s;
+ mpi_init( &T );
+
+ if( X->s == -1 )
+ *p++ = '-';
+
+ if( radix == 16 )
+ {
+ int c;
+ size_t i, j, k;
+
+ for( i = X->n, k = 0; i > 0; i-- )
+ {
+ for( j = ciL; j > 0; j-- )
+ {
+ c = ( X->p[i - 1] >> ( ( j - 1 ) << 3) ) & 0xFF;
+
+ if( c == 0 && k == 0 && ( i + j + 3 ) != 0 )
+ continue;
+
+ p += sprintf( p, "%02X", c );
+ k = 1;
+ }
+ }
+ }
+ else
+ {
+ MPI_CHK( mpi_copy( &T, X ) );
+
+ if( T.s == -1 )
+ T.s = 1;
+
+ MPI_CHK( mpi_write_hlp( &T, radix, &p ) );
+ }
+
+ *p++ = '\0';
+ *slen = p - s;
+
+cleanup:
+
+ mpi_free( &T );
+
+ return( ret );
+}
+
+#if defined(POLARSSL_FS_IO)
+/*
+ * Read X from an opened file
+ */
+int mpi_read_file( mpi *X, int radix, FILE *fin )
+{
+ t_uint d;
+ size_t slen;
+ char *p;
+ /*
+ * Buffer should have space for (short) label and decimal formatted MPI,
+ * newline characters and '\0'
+ */
+ char s[ POLARSSL_MPI_READ_BUFFER_SIZE ];
+
+ memset( s, 0, sizeof( s ) );
+ if( fgets( s, sizeof( s ) - 1, fin ) == NULL )
+ return( POLARSSL_ERR_MPI_FILE_IO_ERROR );
+
+ slen = strlen( s );
+ if( slen == sizeof( s ) - 2 )
+ return( POLARSSL_ERR_MPI_BUFFER_TOO_SMALL );
+
+ if( s[slen - 1] == '\n' ) { slen--; s[slen] = '\0'; }
+ if( s[slen - 1] == '\r' ) { slen--; s[slen] = '\0'; }
+
+ p = s + slen;
+ while( --p >= s )
+ if( mpi_get_digit( &d, radix, *p ) != 0 )
+ break;
+
+ return( mpi_read_string( X, radix, p + 1 ) );
+}
+
+/*
+ * Write X into an opened file (or stdout if fout == NULL)
+ */
+int mpi_write_file( const char *p, const mpi *X, int radix, FILE *fout )
+{
+ int ret;
+ size_t n, slen, plen;
+ /*
+ * Buffer should have space for minus sign, hexified MPI and '\0'
+ */
+ char s[ 2 * POLARSSL_MPI_MAX_SIZE + 2 ];
+
+ n = sizeof( s );
+ memset( s, 0, n );
+ n -= 2;
+
+ MPI_CHK( mpi_write_string( X, radix, s, (size_t *) &n ) );
+
+ if( p == NULL ) p = "";
+
+ plen = strlen( p );
+ slen = strlen( s );
+ s[slen++] = '\r';
+ s[slen++] = '\n';
+
+ if( fout != NULL )
+ {
+ if( fwrite( p, 1, plen, fout ) != plen ||
+ fwrite( s, 1, slen, fout ) != slen )
+ return( POLARSSL_ERR_MPI_FILE_IO_ERROR );
+ }
+ else
+ printf( "%s%s", p, s );
+
+cleanup:
+
+ return( ret );
+}
+#endif /* POLARSSL_FS_IO */
+
+/*
+ * Import X from unsigned binary data, big endian
+ */
+int mpi_read_binary( mpi *X, const unsigned char *buf, size_t buflen )
+{
+ int ret;
+ size_t i, j, n;
+
+ for( n = 0; n < buflen; n++ )
+ if( buf[n] != 0 )
+ break;
+
+ MPI_CHK( mpi_grow( X, CHARS_TO_LIMBS( buflen - n ) ) );
+ MPI_CHK( mpi_lset( X, 0 ) );
+
+ for( i = buflen, j = 0; i > n; i--, j++ )
+ X->p[j / ciL] |= ((t_uint) buf[i - 1]) << ((j % ciL) << 3);
+
+cleanup:
+
+ return( ret );
+}
+
+/*
+ * Export X into unsigned binary data, big endian
+ */
+int mpi_write_binary( const mpi *X, unsigned char *buf, size_t buflen )
+{
+ size_t i, j, n;
+
+ n = mpi_size( X );
+
+ if( buflen < n )
+ return( POLARSSL_ERR_MPI_BUFFER_TOO_SMALL );
+
+ memset( buf, 0, buflen );
+
+ for( i = buflen - 1, j = 0; n > 0; i--, j++, n-- )
+ buf[i] = (unsigned char)( X->p[j / ciL] >> ((j % ciL) << 3) );
+
+ return( 0 );
+}
+
+/*
+ * Left-shift: X <<= count
+ */
+int mpi_shift_l( mpi *X, size_t count )
+{
+ int ret;
+ size_t i, v0, t1;
+ t_uint r0 = 0, r1;
+
+ v0 = count / (biL );
+ t1 = count & (biL - 1);
+
+ i = mpi_msb( X ) + count;
+
+ if( X->n * biL < i )
+ MPI_CHK( mpi_grow( X, BITS_TO_LIMBS( i ) ) );
+
+ ret = 0;
+
+ /*
+ * shift by count / limb_size
+ */
+ if( v0 > 0 )
+ {
+ for( i = X->n; i > v0; i-- )
+ X->p[i - 1] = X->p[i - v0 - 1];
+
+ for( ; i > 0; i-- )
+ X->p[i - 1] = 0;
+ }
+
+ /*
+ * shift by count % limb_size
+ */
+ if( t1 > 0 )
+ {
+ for( i = v0; i < X->n; i++ )
+ {
+ r1 = X->p[i] >> (biL - t1);
+ X->p[i] <<= t1;
+ X->p[i] |= r0;
+ r0 = r1;
+ }
+ }
+
+cleanup:
+
+ return( ret );
+}
+
+/*
+ * Right-shift: X >>= count
+ */
+int mpi_shift_r( mpi *X, size_t count )
+{
+ size_t i, v0, v1;
+ t_uint r0 = 0, r1;
+
+ v0 = count / biL;
+ v1 = count & (biL - 1);
+
+ /*
+ * shift by count / limb_size
+ */
+ if( v0 > 0 )
+ {
+ for( i = 0; i < X->n - v0; i++ )
+ X->p[i] = X->p[i + v0];
+
+ for( ; i < X->n; i++ )
+ X->p[i] = 0;
+ }
+
+ /*
+ * shift by count % limb_size
+ */
+ if( v1 > 0 )
+ {
+ for( i = X->n; i > 0; i-- )
+ {
+ r1 = X->p[i - 1] << (biL - v1);
+ X->p[i - 1] >>= v1;
+ X->p[i - 1] |= r0;
+ r0 = r1;
+ }
+ }
+
+ return( 0 );
+}
+
+/*
+ * Compare unsigned values
+ */
+int mpi_cmp_abs( const mpi *X, const mpi *Y )
+{
+ size_t i, j;
+
+ for( i = X->n; i > 0; i-- )
+ if( X->p[i - 1] != 0 )
+ break;
+
+ for( j = Y->n; j > 0; j-- )
+ if( Y->p[j - 1] != 0 )
+ break;
+
+ if( i == 0 && j == 0 )
+ return( 0 );
+
+ if( i > j ) return( 1 );
+ if( j > i ) return( -1 );
+
+ for( ; i > 0; i-- )
+ {
+ if( X->p[i - 1] > Y->p[i - 1] ) return( 1 );
+ if( X->p[i - 1] < Y->p[i - 1] ) return( -1 );
+ }
+
+ return( 0 );
+}
+
+/*
+ * Compare signed values
+ */
+int mpi_cmp_mpi( const mpi *X, const mpi *Y )
+{
+ size_t i, j;
+
+ for( i = X->n; i > 0; i-- )
+ if( X->p[i - 1] != 0 )
+ break;
+
+ for( j = Y->n; j > 0; j-- )
+ if( Y->p[j - 1] != 0 )
+ break;
+
+ if( i == 0 && j == 0 )
+ return( 0 );
+
+ if( i > j ) return( X->s );
+ if( j > i ) return( -Y->s );
+
+ if( X->s > 0 && Y->s < 0 ) return( 1 );
+ if( Y->s > 0 && X->s < 0 ) return( -1 );
+
+ for( ; i > 0; i-- )
+ {
+ if( X->p[i - 1] > Y->p[i - 1] ) return( X->s );
+ if( X->p[i - 1] < Y->p[i - 1] ) return( -X->s );
+ }
+
+ return( 0 );
+}
+
+/*
+ * Compare signed values
+ */
+int mpi_cmp_int( const mpi *X, t_sint z )
+{
+ mpi Y;
+ t_uint p[1];
+
+ *p = ( z < 0 ) ? -z : z;
+ Y.s = ( z < 0 ) ? -1 : 1;
+ Y.n = 1;
+ Y.p = p;
+
+ return( mpi_cmp_mpi( X, &Y ) );
+}
+
+/*
+ * Unsigned addition: X = |A| + |B| (HAC 14.7)
+ */
+int mpi_add_abs( mpi *X, const mpi *A, const mpi *B )
+{
+ int ret;
+ size_t i, j;
+ t_uint *o, *p, c;
+
+ if( X == B )
+ {
+ const mpi *T = A; A = X; B = T;
+ }
+
+ if( X != A )
+ MPI_CHK( mpi_copy( X, A ) );
+
+ /*
+ * X should always be positive as a result of unsigned additions.
+ */
+ X->s = 1;
+
+ for( j = B->n; j > 0; j-- )
+ if( B->p[j - 1] != 0 )
+ break;
+
+ MPI_CHK( mpi_grow( X, j ) );
+
+ o = B->p; p = X->p; c = 0;
+
+ for( i = 0; i < j; i++, o++, p++ )
+ {
+ *p += c; c = ( *p < c );
+ *p += *o; c += ( *p < *o );
+ }
+
+ while( c != 0 )
+ {
+ if( i >= X->n )
+ {
+ MPI_CHK( mpi_grow( X, i + 1 ) );
+ p = X->p + i;
+ }
+
+ *p += c; c = ( *p < c ); i++;
+ }
+
+cleanup:
+
+ return( ret );
+}
+
+/*
+ * Helper for mpi substraction
+ */
+static void mpi_sub_hlp( size_t n, t_uint *s, t_uint *d )
+{
+ size_t i;
+ t_uint c, z;
+
+ for( i = c = 0; i < n; i++, s++, d++ )
+ {
+ z = ( *d < c ); *d -= c;
+ c = ( *d < *s ) + z; *d -= *s;
+ }
+
+ while( c != 0 )
+ {
+ z = ( *d < c ); *d -= c;
+ c = z; i++; d++;
+ }
+}
+
+/*
+ * Unsigned substraction: X = |A| - |B| (HAC 14.9)
+ */
+int mpi_sub_abs( mpi *X, const mpi *A, const mpi *B )
+{
+ mpi TB;
+ int ret;
+ size_t n;
+
+ if( mpi_cmp_abs( A, B ) < 0 )
+ return( POLARSSL_ERR_MPI_NEGATIVE_VALUE );
+
+ mpi_init( &TB );
+
+ if( X == B )
+ {
+ MPI_CHK( mpi_copy( &TB, B ) );
+ B = &TB;
+ }
+
+ if( X != A )
+ MPI_CHK( mpi_copy( X, A ) );
+
+ /*
+ * X should always be positive as a result of unsigned substractions.
+ */
+ X->s = 1;
+
+ ret = 0;
+
+ for( n = B->n; n > 0; n-- )
+ if( B->p[n - 1] != 0 )
+ break;
+
+ mpi_sub_hlp( n, B->p, X->p );
+
+cleanup:
+
+ mpi_free( &TB );
+
+ return( ret );
+}
+
+/*
+ * Signed addition: X = A + B
+ */
+int mpi_add_mpi( mpi *X, const mpi *A, const mpi *B )
+{
+ int ret, s = A->s;
+
+ if( A->s * B->s < 0 )
+ {
+ if( mpi_cmp_abs( A, B ) >= 0 )
+ {
+ MPI_CHK( mpi_sub_abs( X, A, B ) );
+ X->s = s;
+ }
+ else
+ {
+ MPI_CHK( mpi_sub_abs( X, B, A ) );
+ X->s = -s;
+ }
+ }
+ else
+ {
+ MPI_CHK( mpi_add_abs( X, A, B ) );
+ X->s = s;
+ }
+
+cleanup:
+
+ return( ret );
+}
+
+/*
+ * Signed substraction: X = A - B
+ */
+int mpi_sub_mpi( mpi *X, const mpi *A, const mpi *B )
+{
+ int ret, s = A->s;
+
+ if( A->s * B->s > 0 )
+ {
+ if( mpi_cmp_abs( A, B ) >= 0 )
+ {
+ MPI_CHK( mpi_sub_abs( X, A, B ) );
+ X->s = s;
+ }
+ else
+ {
+ MPI_CHK( mpi_sub_abs( X, B, A ) );
+ X->s = -s;
+ }
+ }
+ else
+ {
+ MPI_CHK( mpi_add_abs( X, A, B ) );
+ X->s = s;
+ }
+
+cleanup:
+
+ return( ret );
+}
+
+/*
+ * Signed addition: X = A + b
+ */
+int mpi_add_int( mpi *X, const mpi *A, t_sint b )
+{
+ mpi _B;
+ t_uint p[1];
+
+ p[0] = ( b < 0 ) ? -b : b;
+ _B.s = ( b < 0 ) ? -1 : 1;
+ _B.n = 1;
+ _B.p = p;
+
+ return( mpi_add_mpi( X, A, &_B ) );
+}
+
+/*
+ * Signed substraction: X = A - b
+ */
+int mpi_sub_int( mpi *X, const mpi *A, t_sint b )
+{
+ mpi _B;
+ t_uint p[1];
+
+ p[0] = ( b < 0 ) ? -b : b;
+ _B.s = ( b < 0 ) ? -1 : 1;
+ _B.n = 1;
+ _B.p = p;
+
+ return( mpi_sub_mpi( X, A, &_B ) );
+}
+
+/*
+ * Helper for mpi multiplication
+ */
+static void mpi_mul_hlp( size_t i, t_uint *s, t_uint *d, t_uint b )
+{
+ t_uint c = 0, t = 0;
+
+#if defined(MULADDC_HUIT)
+ for( ; i >= 8; i -= 8 )
+ {
+ MULADDC_INIT
+ MULADDC_HUIT
+ MULADDC_STOP
+ }
+
+ for( ; i > 0; i-- )
+ {
+ MULADDC_INIT
+ MULADDC_CORE
+ MULADDC_STOP
+ }
+#else
+ for( ; i >= 16; i -= 16 )
+ {
+ MULADDC_INIT
+ MULADDC_CORE MULADDC_CORE
+ MULADDC_CORE MULADDC_CORE
+ MULADDC_CORE MULADDC_CORE
+ MULADDC_CORE MULADDC_CORE
+
+ MULADDC_CORE MULADDC_CORE
+ MULADDC_CORE MULADDC_CORE
+ MULADDC_CORE MULADDC_CORE
+ MULADDC_CORE MULADDC_CORE
+ MULADDC_STOP
+ }
+
+ for( ; i >= 8; i -= 8 )
+ {
+ MULADDC_INIT
+ MULADDC_CORE MULADDC_CORE
+ MULADDC_CORE MULADDC_CORE
+
+ MULADDC_CORE MULADDC_CORE
+ MULADDC_CORE MULADDC_CORE
+ MULADDC_STOP
+ }
+
+ for( ; i > 0; i-- )
+ {
+ MULADDC_INIT
+ MULADDC_CORE
+ MULADDC_STOP
+ }
+#endif
+
+ t++;
+
+ do {
+ *d += c; c = ( *d < c ); d++;
+ }
+ while( c != 0 );
+}
+//mpi TA, TB;
+/*
+ * Baseline multiplication: X = A * B (HAC 14.12)
+ */
+int mpi_mul_mpi( mpi *X, const mpi *A, const mpi *B )
+{
+ int ret;
+ size_t i, j;
+ mpi TDA, TB;
+
+ mpi_init( &TDA ); mpi_init( &TB );
+
+ if( X == A ) { MPI_CHK( mpi_copy( &TDA, A ) ); A = &TDA; }
+ if( X == B ) { MPI_CHK( mpi_copy( &TB, B ) ); B = &TB; }
+
+ for( i = A->n; i > 0; i-- )
+ if( A->p[i - 1] != 0 )
+ break;
+
+ for( j = B->n; j > 0; j-- )
+ if( B->p[j - 1] != 0 )
+ break;
+
+ MPI_CHK( mpi_grow( X, i + j ) );
+ MPI_CHK( mpi_lset( X, 0 ) );
+
+ for( i++; j > 0; j-- )
+ mpi_mul_hlp( i - 1, A->p, X->p + j - 1, B->p[j - 1] );
+
+ X->s = A->s * B->s;
+
+cleanup:
+
+ mpi_free( &TB ); mpi_free( &TDA );
+
+ return( ret );
+}
+
+/*
+ * Baseline multiplication: X = A * b
+ */
+int mpi_mul_int( mpi *X, const mpi *A, t_sint b )
+{
+ mpi _B;
+ t_uint p[1];
+
+ _B.s = 1;
+ _B.n = 1;
+ _B.p = p;
+ p[0] = b;
+
+ return( mpi_mul_mpi( X, A, &_B ) );
+}
+
+/*
+ * Division by mpi: A = Q * B + R (HAC 14.20)
+ */
+int mpi_div_mpi( mpi *Q, mpi *R, const mpi *A, const mpi *B )
+{
+ int ret;
+ size_t i, n, t, k;
+ mpi X, Y, Z, T1, T2;
+
+ if( mpi_cmp_int( B, 0 ) == 0 )
+ return( POLARSSL_ERR_MPI_DIVISION_BY_ZERO );
+
+ mpi_init( &X ); mpi_init( &Y ); mpi_init( &Z );
+ mpi_init( &T1 ); mpi_init( &T2 );
+
+ if( mpi_cmp_abs( A, B ) < 0 )
+ {
+ if( Q != NULL ) MPI_CHK( mpi_lset( Q, 0 ) );
+ if( R != NULL ) MPI_CHK( mpi_copy( R, A ) );
+ return( 0 );
+ }
+
+ MPI_CHK( mpi_copy( &X, A ) );
+ MPI_CHK( mpi_copy( &Y, B ) );
+ X.s = Y.s = 1;
+
+ MPI_CHK( mpi_grow( &Z, A->n + 2 ) );
+ MPI_CHK( mpi_lset( &Z, 0 ) );
+ MPI_CHK( mpi_grow( &T1, 2 ) );
+ MPI_CHK( mpi_grow( &T2, 3 ) );
+
+ k = mpi_msb( &Y ) % biL;
+ if( k < biL - 1 )
+ {
+ k = biL - 1 - k;
+ MPI_CHK( mpi_shift_l( &X, k ) );
+ MPI_CHK( mpi_shift_l( &Y, k ) );
+ }
+ else k = 0;
+
+ n = X.n - 1;
+ t = Y.n - 1;
+ mpi_shift_l( &Y, biL * (n - t) );
+
+ while( mpi_cmp_mpi( &X, &Y ) >= 0 )
+ {
+ Z.p[n - t]++;
+ mpi_sub_mpi( &X, &X, &Y );
+ }
+ mpi_shift_r( &Y, biL * (n - t) );
+
+ for( i = n; i > t ; i-- )
+ {
+ if( X.p[i] >= Y.p[t] )
+ Z.p[i - t - 1] = ~0;
+ else
+ {
+#if defined(POLARSSL_HAVE_LONGLONG)
+ t_udbl r;
+
+ r = (t_udbl) X.p[i] << biL;
+ r |= (t_udbl) X.p[i - 1];
+ r /= Y.p[t];
+ if( r > ((t_udbl) 1 << biL) - 1)
+ r = ((t_udbl) 1 << biL) - 1;
+
+ Z.p[i - t - 1] = (t_uint) r;
+#else
+ /*
+ * __udiv_qrnnd_c, from gmp/longlong.h
+ */
+ t_uint q0, q1, r0, r1;
+ t_uint d0, d1, d, m;
+
+ d = Y.p[t];
+ d0 = ( d << biH ) >> biH;
+ d1 = ( d >> biH );
+
+ q1 = X.p[i] / d1;
+ r1 = X.p[i] - d1 * q1;
+ r1 <<= biH;
+ r1 |= ( X.p[i - 1] >> biH );
+
+ m = q1 * d0;
+ if( r1 < m )
+ {
+ q1--, r1 += d;
+ while( r1 >= d && r1 < m )
+ q1--, r1 += d;
+ }
+ r1 -= m;
+
+ q0 = r1 / d1;
+ r0 = r1 - d1 * q0;
+ r0 <<= biH;
+ r0 |= ( X.p[i - 1] << biH ) >> biH;
+
+ m = q0 * d0;
+ if( r0 < m )
+ {
+ q0--, r0 += d;
+ while( r0 >= d && r0 < m )
+ q0--, r0 += d;
+ }
+ r0 -= m;
+
+ Z.p[i - t - 1] = ( q1 << biH ) | q0;
+#endif
+ }
+
+ Z.p[i - t - 1]++;
+ do
+ {
+ Z.p[i - t - 1]--;
+
+ MPI_CHK( mpi_lset( &T1, 0 ) );
+ T1.p[0] = (t < 1) ? 0 : Y.p[t - 1];
+ T1.p[1] = Y.p[t];
+ MPI_CHK( mpi_mul_int( &T1, &T1, Z.p[i - t - 1] ) );
+
+ MPI_CHK( mpi_lset( &T2, 0 ) );
+ T2.p[0] = (i < 2) ? 0 : X.p[i - 2];
+ T2.p[1] = (i < 1) ? 0 : X.p[i - 1];
+ T2.p[2] = X.p[i];
+ }
+ while( mpi_cmp_mpi( &T1, &T2 ) > 0 );
+
+ MPI_CHK( mpi_mul_int( &T1, &Y, Z.p[i - t - 1] ) );
+ MPI_CHK( mpi_shift_l( &T1, biL * (i - t - 1) ) );
+ MPI_CHK( mpi_sub_mpi( &X, &X, &T1 ) );
+
+ if( mpi_cmp_int( &X, 0 ) < 0 )
+ {
+ MPI_CHK( mpi_copy( &T1, &Y ) );
+ MPI_CHK( mpi_shift_l( &T1, biL * (i - t - 1) ) );
+ MPI_CHK( mpi_add_mpi( &X, &X, &T1 ) );
+ Z.p[i - t - 1]--;
+ }
+ }
+
+ if( Q != NULL )
+ {
+ mpi_copy( Q, &Z );
+ Q->s = A->s * B->s;
+ }
+
+ if( R != NULL )
+ {
+ mpi_shift_r( &X, k );
+ mpi_copy( R, &X );
+
+ R->s = A->s;
+ if( mpi_cmp_int( R, 0 ) == 0 )
+ R->s = 1;
+ }
+
+cleanup:
+
+ mpi_free( &X ); mpi_free( &Y ); mpi_free( &Z );
+ mpi_free( &T1 ); mpi_free( &T2 );
+
+ return( ret );
+}
+
+/*
+ * Division by int: A = Q * b + R
+ *
+ * Returns 0 if successful
+ * 1 if memory allocation failed
+ * POLARSSL_ERR_MPI_DIVISION_BY_ZERO if b == 0
+ */
+int mpi_div_int( mpi *Q, mpi *R, const mpi *A, t_sint b )
+{
+ mpi _B;
+ t_uint p[1];
+
+ p[0] = ( b < 0 ) ? -b : b;
+ _B.s = ( b < 0 ) ? -1 : 1;
+ _B.n = 1;
+ _B.p = p;
+
+ return( mpi_div_mpi( Q, R, A, &_B ) );
+}
+
+/*
+ * Modulo: R = A mod B
+ */
+int mpi_mod_mpi( mpi *R, const mpi *A, const mpi *B )
+{
+ int ret;
+
+ if( mpi_cmp_int( B, 0 ) < 0 )
+ return POLARSSL_ERR_MPI_NEGATIVE_VALUE;
+
+ MPI_CHK( mpi_div_mpi( NULL, R, A, B ) );
+
+ while( mpi_cmp_int( R, 0 ) < 0 )
+ MPI_CHK( mpi_add_mpi( R, R, B ) );
+
+ while( mpi_cmp_mpi( R, B ) >= 0 )
+ MPI_CHK( mpi_sub_mpi( R, R, B ) );
+
+cleanup:
+
+ return( ret );
+}
+
+/*
+ * Modulo: r = A mod b
+ */
+int mpi_mod_int( t_uint *r, const mpi *A, t_sint b )
+{
+ size_t i;
+ t_uint x, y, z;
+
+ if( b == 0 )
+ return( POLARSSL_ERR_MPI_DIVISION_BY_ZERO );
+
+ if( b < 0 )
+ return POLARSSL_ERR_MPI_NEGATIVE_VALUE;
+
+ /*
+ * handle trivial cases
+ */
+ if( b == 1 )
+ {
+ *r = 0;
+ return( 0 );
+ }
+
+ if( b == 2 )
+ {
+ *r = A->p[0] & 1;
+ return( 0 );
+ }
+
+ /*
+ * general case
+ */
+ for( i = A->n, y = 0; i > 0; i-- )
+ {
+ x = A->p[i - 1];
+ y = ( y << biH ) | ( x >> biH );
+ z = y / b;
+ y -= z * b;
+
+ x <<= biH;
+ y = ( y << biH ) | ( x >> biH );
+ z = y / b;
+ y -= z * b;
+ }
+
+ /*
+ * If A is negative, then the current y represents a negative value.
+ * Flipping it to the positive side.
+ */
+ if( A->s < 0 && y != 0 )
+ y = b - y;
+
+ *r = y;
+
+ return( 0 );
+}
+
+/*
+ * Fast Montgomery initialization (thanks to Tom St Denis)
+ */
+static void mpi_montg_init( t_uint *mm, const mpi *N )
+{
+ t_uint x, m0 = N->p[0];
+
+ x = m0;
+ x += ( ( m0 + 2 ) & 4 ) << 1;
+ x *= ( 2 - ( m0 * x ) );
+
+ if( biL >= 16 ) x *= ( 2 - ( m0 * x ) );
+ if( biL >= 32 ) x *= ( 2 - ( m0 * x ) );
+ if( biL >= 64 ) x *= ( 2 - ( m0 * x ) );
+
+ *mm = ~x + 1;
+}
+
+/*
+ * Montgomery multiplication: A = A * B * R^-1 mod N (HAC 14.36)
+ */
+static void mpi_montmul( mpi *A, const mpi *B, const mpi *N, t_uint mm, const mpi *T )
+{
+ size_t i, n, m;
+ t_uint u0, u1, *d;
+
+ memset( T->p, 0, T->n * ciL );
+
+ d = T->p;
+ n = N->n;
+ m = ( B->n < n ) ? B->n : n;
+
+ for( i = 0; i < n; i++ )
+ {
+ /*
+ * T = (T + u0*B + u1*N) / 2^biL
+ */
+ u0 = A->p[i];
+ u1 = ( d[0] + u0 * B->p[0] ) * mm;
+
+ mpi_mul_hlp( m, B->p, d, u0 );
+ mpi_mul_hlp( n, N->p, d, u1 );
+
+ *d++ = u0; d[n + 1] = 0;
+ }
+
+ memcpy( A->p, d, (n + 1) * ciL );
+
+ if( mpi_cmp_abs( A, N ) >= 0 )
+ mpi_sub_hlp( n, N->p, A->p );
+ else
+ /* prevent timing attacks */
+ mpi_sub_hlp( n, A->p, T->p );
+}
+
+/*
+ * Montgomery reduction: A = A * R^-1 mod N
+ */
+static void mpi_montred( mpi *A, const mpi *N, t_uint mm, const mpi *T )
+{
+ t_uint z = 1;
+ mpi U;
+
+ U.n = U.s = z;
+ U.p = &z;
+
+ mpi_montmul( A, &U, N, mm, T );
+}
+
+/*
+ * Sliding-window exponentiation: X = A^E mod N (HAC 14.85)
+ */
+int mpi_exp_mod( mpi *X, const mpi *A, const mpi *E, const mpi *N, mpi *_RR )
+{
+ int ret;
+ size_t wbits, wsize, one = 1;
+ size_t i, j, nblimbs;
+ size_t bufsize, nbits;
+ t_uint ei, mm, state;
+ mpi RR, T, W[ 2 << POLARSSL_MPI_WINDOW_SIZE ];
+
+ if( mpi_cmp_int( N, 0 ) < 0 || ( N->p[0] & 1 ) == 0 )
+ return( POLARSSL_ERR_MPI_BAD_INPUT_DATA );
+
+ /*
+ * Init temps and window size
+ */
+ mpi_montg_init( &mm, N );
+ mpi_init( &RR ); mpi_init( &T );
+ memset( W, 0, sizeof( W ) );
+
+ i = mpi_msb( E );
+
+ wsize = ( i > 671 ) ? 6 : ( i > 239 ) ? 5 :
+ ( i > 79 ) ? 4 : ( i > 23 ) ? 3 : 1;
+
+ if( wsize > POLARSSL_MPI_WINDOW_SIZE )
+ wsize = POLARSSL_MPI_WINDOW_SIZE;
+
+ j = N->n + 1;
+ MPI_CHK( mpi_grow( X, j ) );
+ MPI_CHK( mpi_grow( &W[1], j ) );
+ MPI_CHK( mpi_grow( &T, j * 2 ) );
+
+ /*
+ * If 1st call, pre-compute R^2 mod N
+ */
+ if( _RR == NULL || _RR->p == NULL )
+ {
+ MPI_CHK( mpi_lset( &RR, 1 ) );
+ MPI_CHK( mpi_shift_l( &RR, N->n * 2 * biL ) );
+ MPI_CHK( mpi_mod_mpi( &RR, &RR, N ) );
+
+ if( _RR != NULL )
+ memcpy( _RR, &RR, sizeof( mpi ) );
+ }
+ else
+ memcpy( &RR, _RR, sizeof( mpi ) );
+
+ /*
+ * W[1] = A * R^2 * R^-1 mod N = A * R mod N
+ */
+ if( mpi_cmp_mpi( A, N ) >= 0 )
+ mpi_mod_mpi( &W[1], A, N );
+ else mpi_copy( &W[1], A );
+
+ mpi_montmul( &W[1], &RR, N, mm, &T );
+
+ /*
+ * X = R^2 * R^-1 mod N = R mod N
+ */
+ MPI_CHK( mpi_copy( X, &RR ) );
+ mpi_montred( X, N, mm, &T );
+
+ if( wsize > 1 )
+ {
+ /*
+ * W[1 << (wsize - 1)] = W[1] ^ (wsize - 1)
+ */
+ j = one << (wsize - 1);
+
+ MPI_CHK( mpi_grow( &W[j], N->n + 1 ) );
+ MPI_CHK( mpi_copy( &W[j], &W[1] ) );
+
+ for( i = 0; i < wsize - 1; i++ )
+ mpi_montmul( &W[j], &W[j], N, mm, &T );
+
+ /*
+ * W[i] = W[i - 1] * W[1]
+ */
+ for( i = j + 1; i < (one << wsize); i++ )
+ {
+ MPI_CHK( mpi_grow( &W[i], N->n + 1 ) );
+ MPI_CHK( mpi_copy( &W[i], &W[i - 1] ) );
+
+ mpi_montmul( &W[i], &W[1], N, mm, &T );
+ }
+ }
+
+ nblimbs = E->n;
+ bufsize = 0;
+ nbits = 0;
+ wbits = 0;
+ state = 0;
+
+ while( 1 )
+ {
+ if( bufsize == 0 )
+ {
+ if( nblimbs-- == 0 )
+ break;
+
+ bufsize = sizeof( t_uint ) << 3;
+ }
+
+ bufsize--;
+
+ ei = (E->p[nblimbs] >> bufsize) & 1;
+
+ /*
+ * skip leading 0s
+ */
+ if( ei == 0 && state == 0 )
+ continue;
+
+ if( ei == 0 && state == 1 )
+ {
+ /*
+ * out of window, square X
+ */
+ mpi_montmul( X, X, N, mm, &T );
+ continue;
+ }
+
+ /*
+ * add ei to current window
+ */
+ state = 2;
+
+ nbits++;
+ wbits |= (ei << (wsize - nbits));
+
+ if( nbits == wsize )
+ {
+ /*
+ * X = X^wsize R^-1 mod N
+ */
+ for( i = 0; i < wsize; i++ )
+ mpi_montmul( X, X, N, mm, &T );
+
+ /*
+ * X = X * W[wbits] R^-1 mod N
+ */
+ mpi_montmul( X, &W[wbits], N, mm, &T );
+
+ state--;
+ nbits = 0;
+ wbits = 0;
+ }
+ }
+
+ /*
+ * process the remaining bits
+ */
+ for( i = 0; i < nbits; i++ )
+ {
+ mpi_montmul( X, X, N, mm, &T );
+
+ wbits <<= 1;
+
+ if( (wbits & (one << wsize)) != 0 )
+ mpi_montmul( X, &W[1], N, mm, &T );
+ }
+
+ /*
+ * X = A^E * R * R^-1 mod N = A^E mod N
+ */
+ mpi_montred( X, N, mm, &T );
+
+cleanup:
+
+ for( i = (one << (wsize - 1)); i < (one << wsize); i++ )
+ mpi_free( &W[i] );
+
+ mpi_free( &W[1] ); mpi_free( &T );
+
+ if( _RR == NULL )
+ mpi_free( &RR );
+
+ return( ret );
+}
+
+/*
+ * Greatest common divisor: G = gcd(A, B) (HAC 14.54)
+ */
+int mpi_gcd( mpi *G, const mpi *A, const mpi *B )
+{
+ int ret;
+ size_t lz, lzt;
+ mpi TG, TDA, TB;
+
+ mpi_init( &TG ); mpi_init( &TDA ); mpi_init( &TB );
+
+ MPI_CHK( mpi_copy( &TDA, A ) );
+ MPI_CHK( mpi_copy( &TB, B ) );
+
+ lz = mpi_lsb( &TDA );
+ lzt = mpi_lsb( &TB );
+
+ if ( lzt < lz )
+ lz = lzt;
+
+ MPI_CHK( mpi_shift_r( &TDA, lz ) );
+ MPI_CHK( mpi_shift_r( &TB, lz ) );
+
+ TDA.s = TB.s = 1;
+
+ while( mpi_cmp_int( &TDA, 0 ) != 0 )
+ {
+ MPI_CHK( mpi_shift_r( &TDA, mpi_lsb( &TDA ) ) );
+ MPI_CHK( mpi_shift_r( &TB, mpi_lsb( &TB ) ) );
+
+ if( mpi_cmp_mpi( &TDA, &TB ) >= 0 )
+ {
+ MPI_CHK( mpi_sub_abs( &TDA, &TDA, &TB ) );
+ MPI_CHK( mpi_shift_r( &TDA, 1 ) );
+ }
+ else
+ {
+ MPI_CHK( mpi_sub_abs( &TB, &TB, &TDA ) );
+ MPI_CHK( mpi_shift_r( &TB, 1 ) );
+ }
+ }
+
+ MPI_CHK( mpi_shift_l( &TB, lz ) );
+ MPI_CHK( mpi_copy( G, &TB ) );
+
+cleanup:
+
+ mpi_free( &TG ); mpi_free( &TDA ); mpi_free( &TB );
+
+ return ret;
+}
+
+int mpi_fill_random( mpi *X, size_t size,
+ int (*f_rng)(void *, unsigned char *, size_t),
+ void *p_rng )
+{
+ int ret;
+
+ MPI_CHK( mpi_grow( X, CHARS_TO_LIMBS( size ) ) );
+ MPI_CHK( mpi_lset( X, 0 ) );
+
+ MPI_CHK( f_rng( p_rng, (unsigned char *) X->p, size ) );
+
+cleanup:
+ return( ret );
+}
+
+#if defined(POLARSSL_GENPRIME)
+
+/*
+ * Modular inverse: X = A^-1 mod N (HAC 14.61 / 14.64)
+ */
+int mpi_inv_mod( mpi *X, const mpi *A, const mpi *N )
+{
+ int ret;
+ mpi G, TDA, TU, U1, U2, TB, TV, V1, V2;
+
+ if( mpi_cmp_int( N, 0 ) <= 0 )
+ return( POLARSSL_ERR_MPI_BAD_INPUT_DATA );
+
+ mpi_init( &TDA ); mpi_init( &TU ); mpi_init( &U1 ); mpi_init( &U2 );
+ mpi_init( &G ); mpi_init( &TB ); mpi_init( &TV );
+ mpi_init( &V1 ); mpi_init( &V2 );
+
+ MPI_CHK( mpi_gcd( &G, A, N ) );
+
+ if( mpi_cmp_int( &G, 1 ) != 0 )
+ {
+ ret = POLARSSL_ERR_MPI_NOT_ACCEPTABLE;
+ goto cleanup;
+ }
+
+ MPI_CHK( mpi_mod_mpi( &TDA, A, N ) );
+ MPI_CHK( mpi_copy( &TU, &TDA ) );
+ MPI_CHK( mpi_copy( &TB, N ) );
+ MPI_CHK( mpi_copy( &TV, N ) );
+
+ MPI_CHK( mpi_lset( &U1, 1 ) );
+ MPI_CHK( mpi_lset( &U2, 0 ) );
+ MPI_CHK( mpi_lset( &V1, 0 ) );
+ MPI_CHK( mpi_lset( &V2, 1 ) );
+
+ do
+ {
+ while( ( TU.p[0] & 1 ) == 0 )
+ {
+ MPI_CHK( mpi_shift_r( &TU, 1 ) );
+
+ if( ( U1.p[0] & 1 ) != 0 || ( U2.p[0] & 1 ) != 0 )
+ {
+ MPI_CHK( mpi_add_mpi( &U1, &U1, &TB ) );
+ MPI_CHK( mpi_sub_mpi( &U2, &U2, &TDA ) );
+ }
+
+ MPI_CHK( mpi_shift_r( &U1, 1 ) );
+ MPI_CHK( mpi_shift_r( &U2, 1 ) );
+ }
+
+ while( ( TV.p[0] & 1 ) == 0 )
+ {
+ MPI_CHK( mpi_shift_r( &TV, 1 ) );
+
+ if( ( V1.p[0] & 1 ) != 0 || ( V2.p[0] & 1 ) != 0 )
+ {
+ MPI_CHK( mpi_add_mpi( &V1, &V1, &TB ) );
+ MPI_CHK( mpi_sub_mpi( &V2, &V2, &TDA ) );
+ }
+
+ MPI_CHK( mpi_shift_r( &V1, 1 ) );
+ MPI_CHK( mpi_shift_r( &V2, 1 ) );
+ }
+
+ if( mpi_cmp_mpi( &TU, &TV ) >= 0 )
+ {
+ MPI_CHK( mpi_sub_mpi( &TU, &TU, &TV ) );
+ MPI_CHK( mpi_sub_mpi( &U1, &U1, &V1 ) );
+ MPI_CHK( mpi_sub_mpi( &U2, &U2, &V2 ) );
+ }
+ else
+ {
+ MPI_CHK( mpi_sub_mpi( &TV, &TV, &TU ) );
+ MPI_CHK( mpi_sub_mpi( &V1, &V1, &U1 ) );
+ MPI_CHK( mpi_sub_mpi( &V2, &V2, &U2 ) );
+ }
+ }
+ while( mpi_cmp_int( &TU, 0 ) != 0 );
+
+ while( mpi_cmp_int( &V1, 0 ) < 0 )
+ MPI_CHK( mpi_add_mpi( &V1, &V1, N ) );
+
+ while( mpi_cmp_mpi( &V1, N ) >= 0 )
+ MPI_CHK( mpi_sub_mpi( &V1, &V1, N ) );
+
+ MPI_CHK( mpi_copy( X, &V1 ) );
+
+cleanup:
+
+ mpi_free( &TDA ); mpi_free( &TU ); mpi_free( &U1 ); mpi_free( &U2 );
+ mpi_free( &G ); mpi_free( &TB ); mpi_free( &TV );
+ mpi_free( &V1 ); mpi_free( &V2 );
+
+ return( ret );
+}
+
+static const int small_prime[] =
+{
+ 3, 5, 7, 11, 13, 17, 19, 23,
+ 29, 31, 37, 41, 43, 47, 53, 59,
+ 61, 67, 71, 73, 79, 83, 89, 97,
+ 101, 103, 107, 109, 113, 127, 131, 137,
+ 139, 149, 151, 157, 163, 167, 173, 179,
+ 181, 191, 193, 197, 199, 211, 223, 227,
+ 229, 233, 239, 241, 251, 257, 263, 269,
+ 271, 277, 281, 283, 293, 307, 311, 313,
+ 317, 331, 337, 347, 349, 353, 359, 367,
+ 373, 379, 383, 389, 397, 401, 409, 419,
+ 421, 431, 433, 439, 443, 449, 457, 461,
+ 463, 467, 479, 487, 491, 499, 503, 509,
+ 521, 523, 541, 547, 557, 563, 569, 571,
+ 577, 587, 593, 599, 601, 607, 613, 617,
+ 619, 631, 641, 643, 647, 653, 659, 661,
+ 673, 677, 683, 691, 701, 709, 719, 727,
+ 733, 739, 743, 751, 757, 761, 769, 773,
+ 787, 797, 809, 811, 821, 823, 827, 829,
+ 839, 853, 857, 859, 863, 877, 881, 883,
+ 887, 907, 911, 919, 929, 937, 941, 947,
+ 953, 967, 971, 977, 983, 991, 997, -103
+};
+
+/*
+ * Miller-Rabin primality test (HAC 4.24)
+ */
+int mpi_is_prime( mpi *X,
+ int (*f_rng)(void *, unsigned char *, size_t),
+ void *p_rng )
+{
+ int ret, xs;
+ size_t i, j, n, s;
+ mpi W, R, T, A, RR;
+
+ if( mpi_cmp_int( X, 0 ) == 0 ||
+ mpi_cmp_int( X, 1 ) == 0 )
+ return( POLARSSL_ERR_MPI_NOT_ACCEPTABLE );
+
+ if( mpi_cmp_int( X, 2 ) == 0 )
+ return( 0 );
+
+ mpi_init( &W ); mpi_init( &R ); mpi_init( &T ); mpi_init( &A );
+ mpi_init( &RR );
+
+ xs = X->s; X->s = 1;
+
+ /*
+ * test trivial factors first
+ */
+ if( ( X->p[0] & 1 ) == 0 )
+ return( POLARSSL_ERR_MPI_NOT_ACCEPTABLE );
+
+ for( i = 0; small_prime[i] > 0; i++ )
+ {
+ t_uint r;
+
+ if( mpi_cmp_int( X, small_prime[i] ) <= 0 )
+ return( 0 );
+
+ MPI_CHK( mpi_mod_int( &r, X, small_prime[i] ) );
+
+ if( r == 0 )
+ return( POLARSSL_ERR_MPI_NOT_ACCEPTABLE );
+ }
+
+ /*
+ * W = |X| - 1
+ * R = W >> lsb( W )
+ */
+ MPI_CHK( mpi_sub_int( &W, X, 1 ) );
+ s = mpi_lsb( &W );
+ MPI_CHK( mpi_copy( &R, &W ) );
+ MPI_CHK( mpi_shift_r( &R, s ) );
+
+ i = mpi_msb( X );
+ /*
+ * HAC, table 4.4
+ */
+ n = ( ( i >= 1300 ) ? 2 : ( i >= 850 ) ? 3 :
+ ( i >= 650 ) ? 4 : ( i >= 350 ) ? 8 :
+ ( i >= 250 ) ? 12 : ( i >= 150 ) ? 18 : 27 );
+
+ for( i = 0; i < n; i++ )
+ {
+ /*
+ * pick a random A, 1 < A < |X| - 1
+ */
+ MPI_CHK( mpi_fill_random( &A, X->n * ciL, f_rng, p_rng ) );
+
+ if( mpi_cmp_mpi( &A, &W ) >= 0 )
+ {
+ j = mpi_msb( &A ) - mpi_msb( &W );
+ MPI_CHK( mpi_shift_r( &A, j + 1 ) );
+ }
+ A.p[0] |= 3;
+
+ /*
+ * A = A^R mod |X|
+ */
+ MPI_CHK( mpi_exp_mod( &A, &A, &R, X, &RR ) );
+
+ if( mpi_cmp_mpi( &A, &W ) == 0 ||
+ mpi_cmp_int( &A, 1 ) == 0 )
+ continue;
+
+ j = 1;
+ while( j < s && mpi_cmp_mpi( &A, &W ) != 0 )
+ {
+ /*
+ * A = A * A mod |X|
+ */
+ MPI_CHK( mpi_mul_mpi( &T, &A, &A ) );
+ MPI_CHK( mpi_mod_mpi( &A, &T, X ) );
+
+ if( mpi_cmp_int( &A, 1 ) == 0 )
+ break;
+
+ j++;
+ }
+
+ /*
+ * not prime if A != |X| - 1 or A == 1
+ */
+ if( mpi_cmp_mpi( &A, &W ) != 0 ||
+ mpi_cmp_int( &A, 1 ) == 0 )
+ {
+ ret = POLARSSL_ERR_MPI_NOT_ACCEPTABLE;
+ break;
+ }
+ }
+
+cleanup:
+
+ X->s = xs;
+
+ mpi_free( &W ); mpi_free( &R ); mpi_free( &T ); mpi_free( &A );
+ mpi_free( &RR );
+
+ return( ret );
+}
+
+/*
+ * Prime number generation
+ */
+int mpi_gen_prime( mpi *X, size_t nbits, int dh_flag,
+ int (*f_rng)(void *, unsigned char *, size_t),
+ void *p_rng )
+{
+ int ret;
+ size_t k, n;
+ mpi Y;
+
+ if( nbits < 3 || nbits > POLARSSL_MPI_MAX_BITS )
+ return( POLARSSL_ERR_MPI_BAD_INPUT_DATA );
+
+ mpi_init( &Y );
+
+ n = BITS_TO_LIMBS( nbits );
+
+ MPI_CHK( mpi_fill_random( X, n * ciL, f_rng, p_rng ) );
+
+ k = mpi_msb( X );
+ if( k < nbits ) MPI_CHK( mpi_shift_l( X, nbits - k ) );
+ if( k > nbits ) MPI_CHK( mpi_shift_r( X, k - nbits ) );
+
+ X->p[0] |= 3;
+
+ if( dh_flag == 0 )
+ {
+ while( ( ret = mpi_is_prime( X, f_rng, p_rng ) ) != 0 )
+ {
+ if( ret != POLARSSL_ERR_MPI_NOT_ACCEPTABLE )
+ goto cleanup;
+
+ MPI_CHK( mpi_add_int( X, X, 2 ) );
+ }
+ }
+ else
+ {
+ MPI_CHK( mpi_sub_int( &Y, X, 1 ) );
+ MPI_CHK( mpi_shift_r( &Y, 1 ) );
+
+ while( 1 )
+ {
+ if( ( ret = mpi_is_prime( X, f_rng, p_rng ) ) == 0 )
+ {
+ if( ( ret = mpi_is_prime( &Y, f_rng, p_rng ) ) == 0 )
+ break;
+
+ if( ret != POLARSSL_ERR_MPI_NOT_ACCEPTABLE )
+ goto cleanup;
+ }
+
+ if( ret != POLARSSL_ERR_MPI_NOT_ACCEPTABLE )
+ goto cleanup;
+
+ MPI_CHK( mpi_add_int( &Y, X, 1 ) );
+ MPI_CHK( mpi_add_int( X, X, 2 ) );
+ MPI_CHK( mpi_shift_r( &Y, 1 ) );
+ }
+ }
+
+cleanup:
+
+ mpi_free( &Y );
+
+ return( ret );
+}
+
+#endif
+
+#if defined(POLARSSL_SELF_TEST)
+
+#define GCD_PAIR_COUNT 3
+
+static const int gcd_pairs[GCD_PAIR_COUNT][3] =
+{
+ { 693, 609, 21 },
+ { 1764, 868, 28 },
+ { 768454923, 542167814, 1 }
+};
+
+/*
+ * Checkup routine
+ */
+int mpi_self_test( int verbose )
+{
+ int ret, i;
+ mpi A, E, N, X, Y, U, V;
+
+ mpi_init( &A ); mpi_init( &E ); mpi_init( &N ); mpi_init( &X );
+ mpi_init( &Y ); mpi_init( &U ); mpi_init( &V );
+
+ MPI_CHK( mpi_read_string( &A, 16,
+ "EFE021C2645FD1DC586E69184AF4A31E" \
+ "D5F53E93B5F123FA41680867BA110131" \
+ "944FE7952E2517337780CB0DB80E61AA" \
+ "E7C8DDC6C5C6AADEB34EB38A2F40D5E6" ) );
+
+ MPI_CHK( mpi_read_string( &E, 16,
+ "B2E7EFD37075B9F03FF989C7C5051C20" \
+ "34D2A323810251127E7BF8625A4F49A5" \
+ "F3E27F4DA8BD59C47D6DAABA4C8127BD" \
+ "5B5C25763222FEFCCFC38B832366C29E" ) );
+
+ MPI_CHK( mpi_read_string( &N, 16,
+ "0066A198186C18C10B2F5ED9B522752A" \
+ "9830B69916E535C8F047518A889A43A5" \
+ "94B6BED27A168D31D4A52F88925AA8F5" ) );
+
+ MPI_CHK( mpi_mul_mpi( &X, &A, &N ) );
+
+ MPI_CHK( mpi_read_string( &U, 16,
+ "602AB7ECA597A3D6B56FF9829A5E8B85" \
+ "9E857EA95A03512E2BAE7391688D264A" \
+ "A5663B0341DB9CCFD2C4C5F421FEC814" \
+ "8001B72E848A38CAE1C65F78E56ABDEF" \
+ "E12D3C039B8A02D6BE593F0BBBDA56F1" \
+ "ECF677152EF804370C1A305CAF3B5BF1" \
+ "30879B56C61DE584A0F53A2447A51E" ) );
+
+ if( verbose != 0 )
+ printf( " MPI test #1 (mul_mpi): " );
+
+ if( mpi_cmp_mpi( &X, &U ) != 0 )
+ {
+ if( verbose != 0 )
+ printf( "failed\n" );
+
+ return( 1 );
+ }
+
+ if( verbose != 0 )
+ printf( "passed\n" );
+
+ MPI_CHK( mpi_div_mpi( &X, &Y, &A, &N ) );
+
+ MPI_CHK( mpi_read_string( &U, 16,
+ "256567336059E52CAE22925474705F39A94" ) );
+
+ MPI_CHK( mpi_read_string( &V, 16,
+ "6613F26162223DF488E9CD48CC132C7A" \
+ "0AC93C701B001B092E4E5B9F73BCD27B" \
+ "9EE50D0657C77F374E903CDFA4C642" ) );
+
+ if( verbose != 0 )
+ printf( " MPI test #2 (div_mpi): " );
+
+ if( mpi_cmp_mpi( &X, &U ) != 0 ||
+ mpi_cmp_mpi( &Y, &V ) != 0 )
+ {
+ if( verbose != 0 )
+ printf( "failed\n" );
+
+ return( 1 );
+ }
+
+ if( verbose != 0 )
+ printf( "passed\n" );
+
+ MPI_CHK( mpi_exp_mod( &X, &A, &E, &N, NULL ) );
+
+ MPI_CHK( mpi_read_string( &U, 16,
+ "36E139AEA55215609D2816998ED020BB" \
+ "BD96C37890F65171D948E9BC7CBAA4D9" \
+ "325D24D6A3C12710F10A09FA08AB87" ) );
+
+ if( verbose != 0 )
+ printf( " MPI test #3 (exp_mod): " );
+
+ if( mpi_cmp_mpi( &X, &U ) != 0 )
+ {
+ if( verbose != 0 )
+ printf( "failed\n" );
+
+ return( 1 );
+ }
+
+ if( verbose != 0 )
+ printf( "passed\n" );
+
+#if defined(POLARSSL_GENPRIME)
+ MPI_CHK( mpi_inv_mod( &X, &A, &N ) );
+
+ MPI_CHK( mpi_read_string( &U, 16,
+ "003A0AAEDD7E784FC07D8F9EC6E3BFD5" \
+ "C3DBA76456363A10869622EAC2DD84EC" \
+ "C5B8A74DAC4D09E03B5E0BE779F2DF61" ) );
+
+ if( verbose != 0 )
+ printf( " MPI test #4 (inv_mod): " );
+
+ if( mpi_cmp_mpi( &X, &U ) != 0 )
+ {
+ if( verbose != 0 )
+ printf( "failed\n" );
+
+ return( 1 );
+ }
+
+ if( verbose != 0 )
+ printf( "passed\n" );
+#endif
+
+ if( verbose != 0 )
+ printf( " MPI test #5 (simple gcd): " );
+
+ for ( i = 0; i < GCD_PAIR_COUNT; i++)
+ {
+ MPI_CHK( mpi_lset( &X, gcd_pairs[i][0] ) );
+ MPI_CHK( mpi_lset( &Y, gcd_pairs[i][1] ) );
+
+ MPI_CHK( mpi_gcd( &A, &X, &Y ) );
+
+ if( mpi_cmp_int( &A, gcd_pairs[i][2] ) != 0 )
+ {
+ if( verbose != 0 )
+ printf( "failed at %d\n", i );
+
+ return( 1 );
+ }
+ }
+
+ if( verbose != 0 )
+ printf( "passed\n" );
+
+cleanup:
+
+ if( ret != 0 && verbose != 0 )
+ printf( "Unexpected error, return code = %08X\n", ret );
+
+ mpi_free( &A ); mpi_free( &E ); mpi_free( &N ); mpi_free( &X );
+ mpi_free( &Y ); mpi_free( &U ); mpi_free( &V );
+
+ if( verbose != 0 )
+ printf( "\n" );
+
+ return( ret );
+}
+
+#endif
+
+#endif
diff --git a/drivers/misc/rsa/bignum.h b/drivers/misc/rsa/bignum.h
new file mode 100755
index 00000000..3b4d19c4
--- /dev/null
+++ b/drivers/misc/rsa/bignum.h
@@ -0,0 +1,763 @@
+/**
+ * \file bignum.h
+ *
+ * \brief Multi-precision integer library
+ *
+ * Copyright (C) 2006-2010, Brainspark B.V.
+ *
+ * This file is part of PolarSSL (http://www.polarssl.org)
+ * Lead Maintainer: Paul Bakker <polarssl_maintainer at polarssl.org>
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#ifndef POLARSSL_BIGNUM_H
+#define POLARSSL_BIGNUM_H
+
+//#include <common.h>
+//#include <command.h>
+#include <asm/io.h>
+
+//#include <stdio.h>
+//#include <string.h>
+
+//#include "config.h"
+
+#define POLARSSL_ERR_MPI_FILE_IO_ERROR -0x0002 /**< An error occurred while reading from or writing to a file. */
+#define POLARSSL_ERR_MPI_BAD_INPUT_DATA -0x0004 /**< Bad input parameters to function. */
+#define POLARSSL_ERR_MPI_INVALID_CHARACTER -0x0006 /**< There is an invalid character in the digit string. */
+#define POLARSSL_ERR_MPI_BUFFER_TOO_SMALL -0x0008 /**< The buffer is too small to write to. */
+#define POLARSSL_ERR_MPI_NEGATIVE_VALUE -0x000A /**< The input arguments are negative or result in illegal output. */
+#define POLARSSL_ERR_MPI_DIVISION_BY_ZERO -0x000C /**< The input argument for division is zero, which is not allowed. */
+#define POLARSSL_ERR_MPI_NOT_ACCEPTABLE -0x000E /**< The input arguments are not acceptable. */
+#define POLARSSL_ERR_MPI_MALLOC_FAILED -0x0010 /**< Memory allocation failed. */
+
+#define MPI_CHK(f) if( ( ret = f ) != 0 ) goto cleanup
+
+/*
+ * Maximum size MPIs are allowed to grow to in number of limbs.
+ */
+#define POLARSSL_MPI_MAX_LIMBS 10000
+
+/*
+ * Maximum window size used for modular exponentiation. Default: 6
+ * Minimum value: 1. Maximum value: 6.
+ *
+ * Result is an array of ( 2 << POLARSSL_MPI_WINDOW_SIZE ) MPIs used
+ * for the sliding window calculation. (So 64 by default)
+ *
+ * Reduction in size, reduces speed.
+ */
+#define POLARSSL_MPI_WINDOW_SIZE 6 /**< Maximum windows size used. */
+
+/*
+ * Maximum size of MPIs allowed in bits and bytes for user-MPIs.
+ * ( Default: 512 bytes => 4096 bits )
+ *
+ * Note: Calculations can results temporarily in larger MPIs. So the number
+ * of limbs required (POLARSSL_MPI_MAX_LIMBS) is higher.
+ */
+#define POLARSSL_MPI_MAX_SIZE 512 /**< Maximum number of bytes for usable MPIs. */
+#define POLARSSL_MPI_MAX_BITS ( 8 * POLARSSL_MPI_MAX_SIZE ) /**< Maximum number of bits for usable MPIs. */
+
+/*
+ * When reading from files with mpi_read_file() the buffer should have space
+ * for a (short) label, the MPI (in the provided radix), the newline
+ * characters and the '\0'.
+ *
+ * By default we assume at least a 10 char label, a minimum radix of 10
+ * (decimal) and a maximum of 4096 bit numbers (1234 decimal chars).
+ */
+#define POLARSSL_MPI_READ_BUFFER_SIZE 1250
+
+/*
+ * Define the base integer type, architecture-wise
+ */
+#if defined(POLARSSL_HAVE_INT8)
+typedef signed char t_sint;
+typedef unsigned char t_uint;
+typedef unsigned short t_udbl;
+#else
+#if defined(POLARSSL_HAVE_INT16)
+typedef signed short t_sint;
+typedef unsigned short t_uint;
+typedef unsigned long t_udbl;
+#else
+ typedef signed long t_sint;
+ typedef unsigned long t_uint;
+ #if defined(_MSC_VER) && defined(_M_IX86)
+ typedef unsigned __int64 t_udbl;
+ #else
+ #if defined(__GNUC__) && ( \
+ defined(__amd64__) || defined(__x86_64__) || \
+ defined(__ppc64__) || defined(__powerpc64__) || \
+ defined(__ia64__) || defined(__alpha__) || \
+ (defined(__sparc__) && defined(__arch64__)) || \
+ defined(__s390x__) )
+ typedef unsigned int t_udbl __attribute__((mode(TI)));
+ #define POLARSSL_HAVE_LONGLONG
+ #else
+ #if defined(POLARSSL_HAVE_LONGLONG)
+ typedef unsigned long long t_udbl;
+ #endif
+ #endif
+ #endif
+#endif
+#endif
+
+/**
+ * \brief MPI structure
+ */
+typedef struct
+{
+ int s; /*!< integer sign */
+ size_t n; /*!< total # of limbs */
+ t_uint *p; /*!< pointer to limbs */
+}
+mpi;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * \brief Initialize one MPI
+ *
+ * \param X One MPI to initialize.
+ */
+void mpi_init( mpi *X );
+
+/**
+ * \brief Unallocate one MPI
+ *
+ * \param X One MPI to unallocate.
+ */
+void mpi_free( mpi *X );
+
+/**
+ * \brief Enlarge to the specified number of limbs
+ *
+ * \param X MPI to grow
+ * \param nblimbs The target number of limbs
+ *
+ * \return 0 if successful,
+ * POLARSSL_ERR_MPI_MALLOC_FAILED if memory allocation failed
+ */
+int mpi_grow( mpi *X, size_t nblimbs );
+
+/**
+ * \brief Copy the contents of Y into X
+ *
+ * \param X Destination MPI
+ * \param Y Source MPI
+ *
+ * \return 0 if successful,
+ * POLARSSL_ERR_MPI_MALLOC_FAILED if memory allocation failed
+ */
+int mpi_copy( mpi *X, const mpi *Y );
+
+/**
+ * \brief Swap the contents of X and Y
+ *
+ * \param X First MPI value
+ * \param Y Second MPI value
+ */
+void mpi_swap( mpi *X, mpi *Y );
+
+/**
+ * \brief Set value from integer
+ *
+ * \param X MPI to set
+ * \param z Value to use
+ *
+ * \return 0 if successful,
+ * POLARSSL_ERR_MPI_MALLOC_FAILED if memory allocation failed
+ */
+int mpi_lset( mpi *X, t_sint z );
+
+/*
+ * \brief Get a specific bit from X
+ *
+ * \param X MPI to use
+ * \param pos Zero-based index of the bit in X
+ *
+ * \return Either a 0 or a 1
+ */
+int mpi_get_bit( mpi *X, size_t pos );
+
+/*
+ * \brief Set a bit of X to a specific value of 0 or 1
+ *
+ * \note Will grow X if necessary to set a bit to 1 in a not yet
+ * existing limb. Will not grow if bit should be set to 0
+ *
+ * \param X MPI to use
+ * \param pos Zero-based index of the bit in X
+ * \param val The value to set the bit to (0 or 1)
+ *
+ * \return 0 if successful,
+ * POLARSSL_ERR_MPI_MALLOC_FAILED if memory allocation failed,
+ * POLARSSL_ERR_MPI_BAD_INPUT_DATA if val is not 0 or 1
+ */
+int mpi_set_bit( mpi *X, size_t pos, unsigned char val );
+
+/**
+ * \brief Return the number of least significant bits
+ *
+ * \param X MPI to use
+ */
+size_t mpi_lsb( const mpi *X );
+
+/**
+ * \brief Return the number of most significant bits
+ *
+ * \param X MPI to use
+ */
+size_t mpi_msb( const mpi *X );
+
+/**
+ * \brief Return the total size in bytes
+ *
+ * \param X MPI to use
+ */
+size_t mpi_size( const mpi *X );
+
+/**
+ * \brief Import from an ASCII string
+ *
+ * \param X Destination MPI
+ * \param radix Input numeric base
+ * \param s Null-terminated string buffer
+ *
+ * \return 0 if successful, or a POLARSSL_ERR_MPI_XXX error code
+ */
+int mpi_read_string( mpi *X, int radix, const char *s );
+
+/**
+ * \brief Export into an ASCII string
+ *
+ * \param X Source MPI
+ * \param radix Output numeric base
+ * \param s String buffer
+ * \param slen String buffer size
+ *
+ * \return 0 if successful, or a POLARSSL_ERR_MPI_XXX error code.
+ * *slen is always updated to reflect the amount
+ * of data that has (or would have) been written.
+ *
+ * \note Call this function with *slen = 0 to obtain the
+ * minimum required buffer size in *slen.
+ */
+int mpi_write_string( const mpi *X, int radix, char *s, size_t *slen );
+
+/**
+ * \brief Read X from an opened file
+ *
+ * \param X Destination MPI
+ * \param radix Input numeric base
+ * \param fin Input file handle
+ *
+ * \return 0 if successful, POLARSSL_ERR_MPI_BUFFER_TOO_SMALL if
+ * the file read buffer is too small or a
+ * POLARSSL_ERR_MPI_XXX error code
+ */
+//int mpi_read_file( mpi *X, int radix, FILE *fin );
+
+/**
+ * \brief Write X into an opened file, or stdout if fout is NULL
+ *
+ * \param p Prefix, can be NULL
+ * \param X Source MPI
+ * \param radix Output numeric base
+ * \param fout Output file handle (can be NULL)
+ *
+ * \return 0 if successful, or a POLARSSL_ERR_MPI_XXX error code
+ *
+ * \note Set fout == NULL to print X on the console.
+ */
+//int mpi_write_file( const char *p, const mpi *X, int radix, FILE *fout );
+
+/**
+ * \brief Import X from unsigned binary data, big endian
+ *
+ * \param X Destination MPI
+ * \param buf Input buffer
+ * \param buflen Input buffer size
+ *
+ * \return 0 if successful,
+ * POLARSSL_ERR_MPI_MALLOC_FAILED if memory allocation failed
+ */
+int mpi_read_binary( mpi *X, const unsigned char *buf, size_t buflen );
+
+/**
+ * \brief Export X into unsigned binary data, big endian
+ *
+ * \param X Source MPI
+ * \param buf Output buffer
+ * \param buflen Output buffer size
+ *
+ * \return 0 if successful,
+ * POLARSSL_ERR_MPI_BUFFER_TOO_SMALL if buf isn't large enough
+ */
+int mpi_write_binary( const mpi *X, unsigned char *buf, size_t buflen );
+
+/**
+ * \brief Left-shift: X <<= count
+ *
+ * \param X MPI to shift
+ * \param count Amount to shift
+ *
+ * \return 0 if successful,
+ * POLARSSL_ERR_MPI_MALLOC_FAILED if memory allocation failed
+ */
+int mpi_shift_l( mpi *X, size_t count );
+
+/**
+ * \brief Right-shift: X >>= count
+ *
+ * \param X MPI to shift
+ * \param count Amount to shift
+ *
+ * \return 0 if successful,
+ * POLARSSL_ERR_MPI_MALLOC_FAILED if memory allocation failed
+ */
+int mpi_shift_r( mpi *X, size_t count );
+
+/**
+ * \brief Compare unsigned values
+ *
+ * \param X Left-hand MPI
+ * \param Y Right-hand MPI
+ *
+ * \return 1 if |X| is greater than |Y|,
+ * -1 if |X| is lesser than |Y| or
+ * 0 if |X| is equal to |Y|
+ */
+int mpi_cmp_abs( const mpi *X, const mpi *Y );
+
+/**
+ * \brief Compare signed values
+ *
+ * \param X Left-hand MPI
+ * \param Y Right-hand MPI
+ *
+ * \return 1 if X is greater than Y,
+ * -1 if X is lesser than Y or
+ * 0 if X is equal to Y
+ */
+int mpi_cmp_mpi( const mpi *X, const mpi *Y );
+
+/**
+ * \brief Compare signed values
+ *
+ * \param X Left-hand MPI
+ * \param z The integer value to compare to
+ *
+ * \return 1 if X is greater than z,
+ * -1 if X is lesser than z or
+ * 0 if X is equal to z
+ */
+int mpi_cmp_int( const mpi *X, t_sint z );
+
+/**
+ * \brief Unsigned addition: X = |A| + |B|
+ *
+ * \param X Destination MPI
+ * \param A Left-hand MPI
+ * \param B Right-hand MPI
+ *
+ * \return 0 if successful,
+ * POLARSSL_ERR_MPI_MALLOC_FAILED if memory allocation failed
+ */
+int mpi_add_abs( mpi *X, const mpi *A, const mpi *B );
+
+/**
+ * \brief Unsigned substraction: X = |A| - |B|
+ *
+ * \param X Destination MPI
+ * \param A Left-hand MPI
+ * \param B Right-hand MPI
+ *
+ * \return 0 if successful,
+ * POLARSSL_ERR_MPI_NEGATIVE_VALUE if B is greater than A
+ */
+int mpi_sub_abs( mpi *X, const mpi *A, const mpi *B );
+
+/**
+ * \brief Signed addition: X = A + B
+ *
+ * \param X Destination MPI
+ * \param A Left-hand MPI
+ * \param B Right-hand MPI
+ *
+ * \return 0 if successful,
+ * POLARSSL_ERR_MPI_MALLOC_FAILED if memory allocation failed
+ */
+int mpi_add_mpi( mpi *X, const mpi *A, const mpi *B );
+
+/**
+ * \brief Signed substraction: X = A - B
+ *
+ * \param X Destination MPI
+ * \param A Left-hand MPI
+ * \param B Right-hand MPI
+ *
+ * \return 0 if successful,
+ * POLARSSL_ERR_MPI_MALLOC_FAILED if memory allocation failed
+ */
+int mpi_sub_mpi( mpi *X, const mpi *A, const mpi *B );
+
+/**
+ * \brief Signed addition: X = A + b
+ *
+ * \param X Destination MPI
+ * \param A Left-hand MPI
+ * \param b The integer value to add
+ *
+ * \return 0 if successful,
+ * POLARSSL_ERR_MPI_MALLOC_FAILED if memory allocation failed
+ */
+int mpi_add_int( mpi *X, const mpi *A, t_sint b );
+
+/**
+ * \brief Signed substraction: X = A - b
+ *
+ * \param X Destination MPI
+ * \param A Left-hand MPI
+ * \param b The integer value to subtract
+ *
+ * \return 0 if successful,
+ * POLARSSL_ERR_MPI_MALLOC_FAILED if memory allocation failed
+ */
+int mpi_sub_int( mpi *X, const mpi *A, t_sint b );
+
+/**
+ * \brief Baseline multiplication: X = A * B
+ *
+ * \param X Destination MPI
+ * \param A Left-hand MPI
+ * \param B Right-hand MPI
+ *
+ * \return 0 if successful,
+ * POLARSSL_ERR_MPI_MALLOC_FAILED if memory allocation failed
+ */
+int mpi_mul_mpi( mpi *X, const mpi *A, const mpi *B );
+
+/**
+ * \brief Baseline multiplication: X = A * b
+ * Note: b is an unsigned integer type, thus
+ * Negative values of b are ignored.
+ *
+ * \param X Destination MPI
+ * \param A Left-hand MPI
+ * \param b The integer value to multiply with
+ *
+ * \return 0 if successful,
+ * POLARSSL_ERR_MPI_MALLOC_FAILED if memory allocation failed
+ */
+int mpi_mul_int( mpi *X, const mpi *A, t_sint b );
+
+/**
+ * \brief Division by mpi: A = Q * B + R
+ *
+ * \param Q Destination MPI for the quotient
+ * \param R Destination MPI for the rest value
+ * \param A Left-hand MPI
+ * \param B Right-hand MPI
+ *
+ * \return 0 if successful,
+ * POLARSSL_ERR_MPI_MALLOC_FAILED if memory allocation failed,
+ * POLARSSL_ERR_MPI_DIVISION_BY_ZERO if B == 0
+ *
+ * \note Either Q or R can be NULL.
+ */
+int mpi_div_mpi( mpi *Q, mpi *R, const mpi *A, const mpi *B );
+
+/**
+ * \brief Division by int: A = Q * b + R
+ *
+ * \param Q Destination MPI for the quotient
+ * \param R Destination MPI for the rest value
+ * \param A Left-hand MPI
+ * \param b Integer to divide by
+ *
+ * \return 0 if successful,
+ * POLARSSL_ERR_MPI_MALLOC_FAILED if memory allocation failed,
+ * POLARSSL_ERR_MPI_DIVISION_BY_ZERO if b == 0
+ *
+ * \note Either Q or R can be NULL.
+ */
+int mpi_div_int( mpi *Q, mpi *R, const mpi *A, t_sint b );
+
+/**
+ * \brief Modulo: R = A mod B
+ *
+ * \param R Destination MPI for the rest value
+ * \param A Left-hand MPI
+ * \param B Right-hand MPI
+ *
+ * \return 0 if successful,
+ * POLARSSL_ERR_MPI_MALLOC_FAILED if memory allocation failed,
+ * POLARSSL_ERR_MPI_DIVISION_BY_ZERO if B == 0,
+ * POLARSSL_ERR_MPI_NEGATIVE_VALUE if B < 0
+ */
+int mpi_mod_mpi( mpi *R, const mpi *A, const mpi *B );
+
+/**
+ * \brief Modulo: r = A mod b
+ *
+ * \param r Destination t_uint
+ * \param A Left-hand MPI
+ * \param b Integer to divide by
+ *
+ * \return 0 if successful,
+ * POLARSSL_ERR_MPI_MALLOC_FAILED if memory allocation failed,
+ * POLARSSL_ERR_MPI_DIVISION_BY_ZERO if b == 0,
+ * POLARSSL_ERR_MPI_NEGATIVE_VALUE if b < 0
+ */
+int mpi_mod_int( t_uint *r, const mpi *A, t_sint b );
+
+/**
+ * \brief Sliding-window exponentiation: X = A^E mod N
+ *
+ * \param X Destination MPI
+ * \param A Left-hand MPI
+ * \param E Exponent MPI
+ * \param N Modular MPI
+ * \param _RR Speed-up MPI used for recalculations
+ *
+ * \return 0 if successful,
+ * POLARSSL_ERR_MPI_MALLOC_FAILED if memory allocation failed,
+ * POLARSSL_ERR_MPI_BAD_INPUT_DATA if N is negative or even
+ *
+ * \note _RR is used to avoid re-computing R*R mod N across
+ * multiple calls, which speeds up things a bit. It can
+ * be set to NULL if the extra performance is unneeded.
+ */
+int mpi_exp_mod( mpi *X, const mpi *A, const mpi *E, const mpi *N, mpi *_RR );
+
+/**
+ * \brief Fill an MPI X with size bytes of random
+ *
+ * \param X Destination MPI
+ * \param size Size in bytes
+ * \param f_rng RNG function
+ * \param p_rng RNG parameter
+ *
+ * \return 0 if successful,
+ * POLARSSL_ERR_MPI_MALLOC_FAILED if memory allocation failed
+ */
+int mpi_fill_random( mpi *X, size_t size,
+ int (*f_rng)(void *, unsigned char *, size_t),
+ void *p_rng );
+
+/**
+ * \brief Greatest common divisor: G = gcd(A, B)
+ *
+ * \param G Destination MPI
+ * \param A Left-hand MPI
+ * \param B Right-hand MPI
+ *
+ * \return 0 if successful,
+ * POLARSSL_ERR_MPI_MALLOC_FAILED if memory allocation failed
+ */
+int mpi_gcd( mpi *G, const mpi *A, const mpi *B );
+
+/**
+ * \brief Modular inverse: X = A^-1 mod N
+ *
+ * \param X Destination MPI
+ * \param A Left-hand MPI
+ * \param N Right-hand MPI
+ *
+ * \return 0 if successful,
+ * POLARSSL_ERR_MPI_MALLOC_FAILED if memory allocation failed,
+ * POLARSSL_ERR_MPI_BAD_INPUT_DATA if N is negative or nil
+ POLARSSL_ERR_MPI_NOT_ACCEPTABLE if A has no inverse mod N
+ */
+int mpi_inv_mod( mpi *X, const mpi *A, const mpi *N );
+
+/**
+ * \brief Miller-Rabin primality test
+ *
+ * \param X MPI to check
+ * \param f_rng RNG function
+ * \param p_rng RNG parameter
+ *
+ * \return 0 if successful (probably prime),
+ * POLARSSL_ERR_MPI_MALLOC_FAILED if memory allocation failed,
+ * POLARSSL_ERR_MPI_NOT_ACCEPTABLE if X is not prime
+ */
+int mpi_is_prime( mpi *X,
+ int (*f_rng)(void *, unsigned char *, size_t),
+ void *p_rng );
+
+/**
+ * \brief Prime number generation
+ *
+ * \param X Destination MPI
+ * \param nbits Required size of X in bits ( 3 <= nbits <= POLARSSL_MPI_MAX_BITS )
+ * \param dh_flag If 1, then (X-1)/2 will be prime too
+ * \param f_rng RNG function
+ * \param p_rng RNG parameter
+ *
+ * \return 0 if successful (probably prime),
+ * POLARSSL_ERR_MPI_MALLOC_FAILED if memory allocation failed,
+ * POLARSSL_ERR_MPI_BAD_INPUT_DATA if nbits is < 3
+ */
+int mpi_gen_prime( mpi *X, size_t nbits, int dh_flag,
+ int (*f_rng)(void *, unsigned char *, size_t),
+ void *p_rng );
+
+/**
+ * \brief Checkup routine
+ *
+ * \return 0 if successful, or 1 if the test failed
+ */
+int mpi_self_test( int verbose );
+
+#ifdef __cplusplus
+}
+#endif
+
+#define __ARM__ 1
+#ifdef __ARM__
+#define MULADDC_INIT \
+ asm( "ldr r0, %0 " :: "m" (s)); \
+ asm( "ldr r1, %0 " :: "m" (d)); \
+ asm( "ldr r2, %0 " :: "m" (c)); \
+ asm( "ldr r3, %0 " :: "m" (b));
+
+#define MULADDC_CORE \
+ asm( "ldr r4, [r0], #4 " ); \
+ asm( "mov r5, #0 " ); \
+ asm( "ldr r6, [r1] " ); \
+ asm( "umlal r2, r5, r3, r4 " ); \
+ asm( "adds r7, r6, r2 " ); \
+ asm( "adc r2, r5, #0 " ); \
+ asm( "str r7, [r1], #4 " );
+
+#define MULADDC_STOP \
+ asm( "str r2, %0 " : "=m" (c)); \
+ asm( "str r1, %0 " : "=m" (d)); \
+ asm( "str r0, %0 " : "=m" (s) :: \
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7" );
+
+#endif
+
+#ifdef __i386__
+#define MULADDC_INIT \
+ asm( " \
+ movl %%ebx, %0; \
+ movl %5, %%esi; \
+ movl %6, %%edi; \
+ movl %7, %%ecx; \
+ movl %8, %%ebx; \
+ "
+
+#define MULADDC_CORE \
+ " \
+ lodsl; \
+ mull %%ebx; \
+ addl %%ecx, %%eax; \
+ adcl $0, %%edx; \
+ addl (%%edi), %%eax; \
+ adcl $0, %%edx; \
+ movl %%edx, %%ecx; \
+ stosl; \
+ "
+
+
+#define MULADDC_HUIT \
+ " \
+ movd %%ecx, %%mm1; \
+ movd %%ebx, %%mm0; \
+ movd (%%edi), %%mm3; \
+ paddq %%mm3, %%mm1; \
+ movd (%%esi), %%mm2; \
+ pmuludq %%mm0, %%mm2; \
+ movd 4(%%esi), %%mm4; \
+ pmuludq %%mm0, %%mm4; \
+ movd 8(%%esi), %%mm6; \
+ pmuludq %%mm0, %%mm6; \
+ movd 12(%%esi), %%mm7; \
+ pmuludq %%mm0, %%mm7; \
+ paddq %%mm2, %%mm1; \
+ movd 4(%%edi), %%mm3; \
+ paddq %%mm4, %%mm3; \
+ movd 8(%%edi), %%mm5; \
+ paddq %%mm6, %%mm5; \
+ movd 12(%%edi), %%mm4; \
+ paddq %%mm4, %%mm7; \
+ movd %%mm1, (%%edi); \
+ movd 16(%%esi), %%mm2; \
+ pmuludq %%mm0, %%mm2; \
+ psrlq $32, %%mm1; \
+ movd 20(%%esi), %%mm4; \
+ pmuludq %%mm0, %%mm4; \
+ paddq %%mm3, %%mm1; \
+ movd 24(%%esi), %%mm6; \
+ pmuludq %%mm0, %%mm6; \
+ movd %%mm1, 4(%%edi); \
+ psrlq $32, %%mm1; \
+ movd 28(%%esi), %%mm3; \
+ pmuludq %%mm0, %%mm3; \
+ paddq %%mm5, %%mm1; \
+ movd 16(%%edi), %%mm5; \
+ paddq %%mm5, %%mm2; \
+ movd %%mm1, 8(%%edi); \
+ psrlq $32, %%mm1; \
+ paddq %%mm7, %%mm1; \
+ movd 20(%%edi), %%mm5; \
+ paddq %%mm5, %%mm4; \
+ movd %%mm1, 12(%%edi); \
+ psrlq $32, %%mm1; \
+ paddq %%mm2, %%mm1; \
+ movd 24(%%edi), %%mm5; \
+ paddq %%mm5, %%mm6; \
+ movd %%mm1, 16(%%edi); \
+ psrlq $32, %%mm1; \
+ paddq %%mm4, %%mm1; \
+ movd 28(%%edi), %%mm5; \
+ paddq %%mm5, %%mm3; \
+ movd %%mm1, 20(%%edi); \
+ psrlq $32, %%mm1; \
+ paddq %%mm6, %%mm1; \
+ movd %%mm1, 24(%%edi); \
+ psrlq $32, %%mm1; \
+ paddq %%mm3, %%mm1; \
+ movd %%mm1, 28(%%edi); \
+ addl $32, %%edi; \
+ addl $32, %%esi; \
+ psrlq $32, %%mm1; \
+ movd %%mm1, %%ecx; \
+ "
+
+#define MULADDC_STOP \
+ " \
+ emms; \
+ movl %4, %%ebx; \
+ movl %%ecx, %1; \
+ movl %%edi, %2; \
+ movl %%esi, %3; \
+ " \
+ : "=m" (t), "=m" (c), "=m" (d), "=m" (s) \
+ : "m" (t), "m" (s), "m" (d), "m" (c), "m" (b) \
+ : "eax", "ecx", "edx", "esi", "edi" \
+ );
+
+#endif
+#endif /* bignum.h */
diff --git a/drivers/misc/rsa/dhm.h b/drivers/misc/rsa/dhm.h
new file mode 100755
index 00000000..0c8dd55e
--- /dev/null
+++ b/drivers/misc/rsa/dhm.h
@@ -0,0 +1,153 @@
+/**
+ * \file dhm.h
+ *
+ * \brief Diffie-Hellman-Merkle key exchange
+ *
+ * Copyright (C) 2006-2010, Brainspark B.V.
+ *
+ * This file is part of PolarSSL (http://www.polarssl.org)
+ * Lead Maintainer: Paul Bakker <polarssl_maintainer at polarssl.org>
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#ifndef POLARSSL_DHM_H
+#define POLARSSL_DHM_H
+
+#include "bignum.h"
+
+/*
+ * DHM Error codes
+ */
+#define POLARSSL_ERR_DHM_BAD_INPUT_DATA -0x3080 /**< Bad input parameters to function. */
+#define POLARSSL_ERR_DHM_READ_PARAMS_FAILED -0x3100 /**< Reading of the DHM parameters failed. */
+#define POLARSSL_ERR_DHM_MAKE_PARAMS_FAILED -0x3180 /**< Making of the DHM parameters failed. */
+#define POLARSSL_ERR_DHM_READ_PUBLIC_FAILED -0x3200 /**< Reading of the public values failed. */
+#define POLARSSL_ERR_DHM_MAKE_PUBLIC_FAILED -0x3280 /**< Making of the public value failed. */
+#define POLARSSL_ERR_DHM_CALC_SECRET_FAILED -0x3300 /**< Calculation of the DHM secret failed. */
+
+/**
+ * \brief DHM context structure
+ */
+typedef struct
+{
+ size_t len; /*!< size(P) in chars */
+ mpi P; /*!< prime modulus */
+ mpi G; /*!< generator */
+ mpi X; /*!< secret value */
+ mpi GX; /*!< self = G^X mod P */
+ mpi GY; /*!< peer = G^Y mod P */
+ mpi K; /*!< key = GY^X mod P */
+ mpi RP; /*!< cached R^2 mod P */
+}
+dhm_context;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * \brief Parse the ServerKeyExchange parameters
+ *
+ * \param ctx DHM context
+ * \param p &(start of input buffer)
+ * \param end end of buffer
+ *
+ * \return 0 if successful, or an POLARSSL_ERR_DHM_XXX error code
+ */
+int dhm_read_params( dhm_context *ctx,
+ unsigned char **p,
+ const unsigned char *end );
+
+/**
+ * \brief Setup and write the ServerKeyExchange parameters
+ *
+ * \param ctx DHM context
+ * \param x_size private value size in bytes
+ * \param output destination buffer
+ * \param olen number of chars written
+ * \param f_rng RNG function
+ * \param p_rng RNG parameter
+ *
+ * \note This function assumes that ctx->P and ctx->G
+ * have already been properly set (for example
+ * using mpi_read_string or mpi_read_binary).
+ *
+ * \return 0 if successful, or an POLARSSL_ERR_DHM_XXX error code
+ */
+int dhm_make_params( dhm_context *ctx, int x_size,
+ unsigned char *output, size_t *olen,
+ int (*f_rng)(void *, unsigned char *, size_t),
+ void *p_rng );
+
+/**
+ * \brief Import the peer's public value G^Y
+ *
+ * \param ctx DHM context
+ * \param input input buffer
+ * \param ilen size of buffer
+ *
+ * \return 0 if successful, or an POLARSSL_ERR_DHM_XXX error code
+ */
+int dhm_read_public( dhm_context *ctx,
+ const unsigned char *input, size_t ilen );
+
+/**
+ * \brief Create own private value X and export G^X
+ *
+ * \param ctx DHM context
+ * \param x_size private value size in bytes
+ * \param output destination buffer
+ * \param olen must be equal to ctx->P.len
+ * \param f_rng RNG function
+ * \param p_rng RNG parameter
+ *
+ * \return 0 if successful, or an POLARSSL_ERR_DHM_XXX error code
+ */
+int dhm_make_public( dhm_context *ctx, int x_size,
+ unsigned char *output, size_t olen,
+ int (*f_rng)(void *, unsigned char *, size_t),
+ void *p_rng );
+
+/**
+ * \brief Derive and export the shared secret (G^Y)^X mod P
+ *
+ * \param ctx DHM context
+ * \param output destination buffer
+ * \param olen number of chars written
+ *
+ * \return 0 if successful, or an POLARSSL_ERR_DHM_XXX error code
+ */
+int dhm_calc_secret( dhm_context *ctx,
+ unsigned char *output, size_t *olen );
+
+/*
+ * \brief Free the components of a DHM key
+ */
+void dhm_free( dhm_context *ctx );
+
+/**
+ * \brief Checkup routine
+ *
+ * \return 0 if successful, or 1 if the test failed
+ */
+int dhm_self_test( int verbose );
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/drivers/misc/rsa/pem.h b/drivers/misc/rsa/pem.h
new file mode 100755
index 00000000..fbae8c55
--- /dev/null
+++ b/drivers/misc/rsa/pem.h
@@ -0,0 +1,100 @@
+/**
+ * \file pem.h
+ *
+ * \brief Privacy Enhanced Mail (PEM) decoding
+ *
+ * Copyright (C) 2006-2010, Brainspark B.V.
+ *
+ * This file is part of PolarSSL (http://www.polarssl.org)
+ * Lead Maintainer: Paul Bakker <polarssl_maintainer at polarssl.org>
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#ifndef POLARSSL_PEM_H
+#define POLARSSL_PEM_H
+
+//#include <string.h>
+
+/**
+ * \name PEM Error codes
+ * These error codes are returned in case of errors reading the
+ * PEM data.
+ * \{
+ */
+#define POLARSSL_ERR_PEM_NO_HEADER_PRESENT -0x1080 /**< No PEM header found. */
+#define POLARSSL_ERR_PEM_INVALID_DATA -0x1100 /**< PEM string is not as expected. */
+#define POLARSSL_ERR_PEM_MALLOC_FAILED -0x1180 /**< Failed to allocate memory. */
+#define POLARSSL_ERR_PEM_INVALID_ENC_IV -0x1200 /**< RSA IV is not in hex-format. */
+#define POLARSSL_ERR_PEM_UNKNOWN_ENC_ALG -0x1280 /**< Unsupported key encryption algorithm. */
+#define POLARSSL_ERR_PEM_PASSWORD_REQUIRED -0x1300 /**< Private key password can't be empty. */
+#define POLARSSL_ERR_PEM_PASSWORD_MISMATCH -0x1380 /**< Given private key password does not allow for correct decryption. */
+#define POLARSSL_ERR_PEM_FEATURE_UNAVAILABLE -0x1400 /**< Unavailable feature, e.g. hashing/encryption combination. */
+/* \} name */
+
+/**
+ * \brief PEM context structure
+ */
+typedef struct
+{
+ unsigned char *buf; /*!< buffer for decoded data */
+ size_t buflen; /*!< length of the buffer */
+ unsigned char *info; /*!< buffer for extra header information */
+}
+pem_context;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * \brief PEM context setup
+ *
+ * \param ctx context to be initialized
+ */
+void pem_init( pem_context *ctx );
+
+/**
+ * \brief Read a buffer for PEM information and store the resulting
+ * data into the specified context buffers.
+ *
+ * \param ctx context to use
+ * \param header header string to seek and expect
+ * \param footer footer string to seek and expect
+ * \param data source data to look in
+ * \param pwd password for decryption (can be NULL)
+ * \param pwdlen length of password
+ * \param use_len destination for total length used
+ *
+ * \return 0 on success, ior a specific PEM error code
+ */
+int pem_read_buffer( pem_context *ctx,
+ const unsigned char *data,
+ const unsigned char *pwd,
+ size_t pwdlen, size_t *use_len );
+
+/**
+ * \brief PEM context memory freeing
+ *
+ * \param ctx context to be freed
+ */
+void pem_free( pem_context *ctx );
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* pem.h */
diff --git a/drivers/misc/rsa/rsa.h b/drivers/misc/rsa/rsa.h
new file mode 100755
index 00000000..41691ae1
--- /dev/null
+++ b/drivers/misc/rsa/rsa.h
@@ -0,0 +1,373 @@
+/**
+ * \file rsa.h
+ *
+ * \brief The RSA public-key cryptosystem
+ *
+ * Copyright (C) 2006-2010, Brainspark B.V.
+ *
+ * This file is part of PolarSSL (http://www.polarssl.org)
+ * Lead Maintainer: Paul Bakker <polarssl_maintainer at polarssl.org>
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#ifndef POLARSSL_RSA_H
+#define POLARSSL_RSA_H
+
+#include "bignum.h"
+#include <stddef.h>
+/*
+ * RSA Error codes
+ */
+#define POLARSSL_ERR_RSA_BAD_INPUT_DATA -0x4080 /**< Bad input parameters to function. */
+#define POLARSSL_ERR_RSA_INVALID_PADDING -0x4100 /**< Input data contains invalid padding and is rejected. */
+#define POLARSSL_ERR_RSA_KEY_GEN_FAILED -0x4180 /**< Something failed during generation of a key. */
+#define POLARSSL_ERR_RSA_KEY_CHECK_FAILED -0x4200 /**< Key failed to pass the libraries validity check. */
+#define POLARSSL_ERR_RSA_PUBLIC_FAILED -0x4280 /**< The public key operation failed. */
+#define POLARSSL_ERR_RSA_PRIVATE_FAILED -0x4300 /**< The private key operation failed. */
+#define POLARSSL_ERR_RSA_VERIFY_FAILED -0x4380 /**< The PKCS#1 verification failed. */
+#define POLARSSL_ERR_RSA_OUTPUT_TOO_LARGE -0x4400 /**< The output buffer for decryption is not large enough. */
+#define POLARSSL_ERR_RSA_RNG_FAILED -0x4480 /**< The random generator failed to generate non-zeros. */
+
+/*
+ * PKCS#1 constants
+ */
+#define SIG_RSA_RAW 0
+#define SIG_RSA_MD2 2
+#define SIG_RSA_MD4 3
+#define SIG_RSA_MD5 4
+#define SIG_RSA_SHA1 5
+#define SIG_RSA_SHA224 14
+#define SIG_RSA_SHA256 11
+#define SIG_RSA_SHA384 12
+#define SIG_RSA_SHA512 13
+
+#define RSA_PUBLIC 0
+#define RSA_PRIVATE 1
+
+#define RSA_PKCS_V15 0
+#define RSA_PKCS_V21 1
+
+#define RSA_SIGN 1
+#define RSA_CRYPT 2
+
+#define ASN1_STR_CONSTRUCTED_SEQUENCE "\x30"
+#define ASN1_STR_NULL "\x05"
+#define ASN1_STR_OID "\x06"
+#define ASN1_STR_OCTET_STRING "\x04"
+
+#define OID_DIGEST_ALG_MDX "\x2A\x86\x48\x86\xF7\x0D\x02\x00"
+#define OID_HASH_ALG_SHA1 "\x2b\x0e\x03\x02\x1a"
+#define OID_HASH_ALG_SHA2X "\x60\x86\x48\x01\x65\x03\x04\x02\x00"
+
+#define OID_ISO_MEMBER_BODIES "\x2a"
+#define OID_ISO_IDENTIFIED_ORG "\x2b"
+
+/*
+ * ISO Member bodies OID parts
+ */
+#define OID_COUNTRY_US "\x86\x48"
+#define OID_RSA_DATA_SECURITY "\x86\xf7\x0d"
+
+/*
+ * ISO Identified organization OID parts
+ */
+#define OID_OIW_SECSIG_SHA1 "\x0e\x03\x02\x1a"
+
+/*
+ * DigestInfo ::= SEQUENCE {
+ * digestAlgorithm DigestAlgorithmIdentifier,
+ * digest Digest }
+ *
+ * DigestAlgorithmIdentifier ::= AlgorithmIdentifier
+ *
+ * Digest ::= OCTET STRING
+ */
+#define ASN1_HASH_MDX \
+( \
+ ASN1_STR_CONSTRUCTED_SEQUENCE "\x20" \
+ ASN1_STR_CONSTRUCTED_SEQUENCE "\x0C" \
+ ASN1_STR_OID "\x08" \
+ OID_DIGEST_ALG_MDX \
+ ASN1_STR_NULL "\x00" \
+ ASN1_STR_OCTET_STRING "\x10" \
+)
+
+#define ASN1_HASH_SHA1 \
+ ASN1_STR_CONSTRUCTED_SEQUENCE "\x21" \
+ ASN1_STR_CONSTRUCTED_SEQUENCE "\x09" \
+ ASN1_STR_OID "\x05" \
+ OID_HASH_ALG_SHA1 \
+ ASN1_STR_NULL "\x00" \
+ ASN1_STR_OCTET_STRING "\x14"
+
+#define ASN1_HASH_SHA2X \
+ ASN1_STR_CONSTRUCTED_SEQUENCE "\x11" \
+ ASN1_STR_CONSTRUCTED_SEQUENCE "\x0d" \
+ ASN1_STR_OID "\x09" \
+ OID_HASH_ALG_SHA2X \
+ ASN1_STR_NULL "\x00" \
+ ASN1_STR_OCTET_STRING "\x00"
+
+
+/**
+ * \brief RSA context structure
+ */
+typedef struct
+{
+ int ver; /*!< always 0 */
+ size_t len; /*!< size(N) in chars */
+
+ mpi N; /*!< public modulus */
+ mpi E; /*!< public exponent */
+
+ mpi D; /*!< private exponent */
+ mpi P; /*!< 1st prime factor */
+ mpi Q; /*!< 2nd prime factor */
+ mpi DP; /*!< D % (P - 1) */
+ mpi DQ; /*!< D % (Q - 1) */
+ mpi QP; /*!< 1 / (Q % P) */
+
+ mpi RN; /*!< cached R^2 mod N */
+ mpi RP; /*!< cached R^2 mod P */
+ mpi RQ; /*!< cached R^2 mod Q */
+
+ int padding; /*!< RSA_PKCS_V15 for 1.5 padding and
+ RSA_PKCS_v21 for OAEP/PSS */
+ int hash_id; /*!< Hash identifier of md_type_t as
+ specified in the md.h header file
+ for the EME-OAEP and EMSA-PSS
+ encoding */
+}
+rsa_context;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * \brief Initialize an RSA context
+ *
+ * \param ctx RSA context to be initialized
+ * \param padding RSA_PKCS_V15 or RSA_PKCS_V21
+ * \param hash_id RSA_PKCS_V21 hash identifier
+ *
+ * \note The hash_id parameter is actually ignored
+ * when using RSA_PKCS_V15 padding.
+ */
+void rsa_init( rsa_context *ctx,
+ int padding,
+ int hash_id);
+
+/**
+ * \brief Generate an RSA keypair
+ *
+ * \param ctx RSA context that will hold the key
+ * \param f_rng RNG function
+ * \param p_rng RNG parameter
+ * \param nbits size of the public key in bits
+ * \param exponent public exponent (e.g., 65537)
+ *
+ * \note rsa_init() must be called beforehand to setup
+ * the RSA context.
+ *
+ * \return 0 if successful, or an POLARSSL_ERR_RSA_XXX error code
+ */
+int rsa_gen_key( rsa_context *ctx,
+ int (*f_rng)(void *, unsigned char *, size_t),
+ void *p_rng,
+ unsigned int nbits, int exponent );
+
+/**
+ * \brief Check a public RSA key
+ *
+ * \param ctx RSA context to be checked
+ *
+ * \return 0 if successful, or an POLARSSL_ERR_RSA_XXX error code
+ */
+int rsa_check_pubkey( const rsa_context *ctx );
+
+/**
+ * \brief Check a private RSA key
+ *
+ * \param ctx RSA context to be checked
+ *
+ * \return 0 if successful, or an POLARSSL_ERR_RSA_XXX error code
+ */
+int rsa_check_privkey( const rsa_context *ctx );
+
+/**
+ * \brief Do an RSA public key operation
+ *
+ * \param ctx RSA context
+ * \param input input buffer
+ * \param output output buffer
+ *
+ * \return 0 if successful, or an POLARSSL_ERR_RSA_XXX error code
+ *
+ * \note This function does NOT take care of message
+ * padding. Also, be sure to set input[0] = 0 or assure that
+ * input is smaller than N.
+ *
+ * \note The input and output buffers must be large
+ * enough (eg. 128 bytes if RSA-1024 is used).
+ */
+int rsa_public( rsa_context *ctx,
+ const unsigned char *input,
+ unsigned char *output );
+
+/**
+ * \brief Do an RSA private key operation
+ *
+ * \param ctx RSA context
+ * \param input input buffer
+ * \param output output buffer
+ *
+ * \return 0 if successful, or an POLARSSL_ERR_RSA_XXX error code
+ *
+ * \note The input and output buffers must be large
+ * enough (eg. 128 bytes if RSA-1024 is used).
+ */
+int rsa_private( rsa_context *ctx,
+ const unsigned char *input,
+ unsigned char *output );
+
+/**
+ * \brief Add the message padding, then do an RSA operation
+ *
+ * \param ctx RSA context
+ * \param f_rng RNG function (Needed for padding and PKCS#1 v2.1 encoding)
+ * \param p_rng RNG parameter
+ * \param mode RSA_PUBLIC or RSA_PRIVATE
+ * \param ilen contains the plaintext length
+ * \param input buffer holding the data to be encrypted
+ * \param output buffer that will hold the ciphertext
+ *
+ * \return 0 if successful, or an POLARSSL_ERR_RSA_XXX error code
+ *
+ * \note The output buffer must be as large as the size
+ * of ctx->N (eg. 128 bytes if RSA-1024 is used).
+ */
+int rsa_pkcs1_encrypt( rsa_context *ctx,
+ int (*f_rng)(void *, unsigned char *, size_t),
+ void *p_rng,
+ int mode, size_t ilen,
+ const unsigned char *input,
+ unsigned char *output );
+
+/**
+ * \brief Do an RSA operation, then remove the message padding
+ *
+ * \param ctx RSA context
+ * \param mode RSA_PUBLIC or RSA_PRIVATE
+ * \param olen will contain the plaintext length
+ * \param input buffer holding the encrypted data
+ * \param output buffer that will hold the plaintext
+ * \param output_max_len maximum length of the output buffer
+ *
+ * \return 0 if successful, or an POLARSSL_ERR_RSA_XXX error code
+ *
+ * \note The output buffer must be as large as the size
+ * of ctx->N (eg. 128 bytes if RSA-1024 is used) otherwise
+ * an error is thrown.
+ */
+int rsa_pkcs1_decrypt( rsa_context *ctx,
+ int mode, size_t *olen,
+ const unsigned char *input,
+ unsigned char *output,
+ size_t output_max_len );
+
+/**
+ * \brief Do a private RSA to sign a message digest
+ *
+ * \param ctx RSA context
+ * \param f_rng RNG function (Needed for PKCS#1 v2.1 encoding)
+ * \param p_rng RNG parameter
+ * \param mode RSA_PUBLIC or RSA_PRIVATE
+ * \param hash_id SIG_RSA_RAW, SIG_RSA_MD{2,4,5} or SIG_RSA_SHA{1,224,256,384,512}
+ * \param hashlen message digest length (for SIG_RSA_RAW only)
+ * \param hash buffer holding the message digest
+ * \param sig buffer that will hold the ciphertext
+ *
+ * \return 0 if the signing operation was successful,
+ * or an POLARSSL_ERR_RSA_XXX error code
+ *
+ * \note The "sig" buffer must be as large as the size
+ * of ctx->N (eg. 128 bytes if RSA-1024 is used).
+ *
+ * \note In case of PKCS#1 v2.1 encoding keep in mind that
+ * the hash_id in the RSA context is the one used for the
+ * encoding. hash_id in the function call is the type of hash
+ * that is encoded. According to RFC 3447 it is advised to
+ * keep both hashes the same.
+ */
+int rsa_pkcs1_sign( rsa_context *ctx,
+ int (*f_rng)(void *, unsigned char *, size_t),
+ void *p_rng,
+ int mode,
+ int hash_id,
+ unsigned int hashlen,
+ const unsigned char *hash,
+ unsigned char *sig );
+
+/**
+ * \brief Do a public RSA and check the message digest
+ *
+ * \param ctx points to an RSA public key
+ * \param mode RSA_PUBLIC or RSA_PRIVATE
+ * \param hash_id SIG_RSA_RAW, SIG_RSA_MD{2,4,5} or SIG_RSA_SHA{1,224,256,384,512}
+ * \param hashlen message digest length (for SIG_RSA_RAW only)
+ * \param hash buffer holding the message digest
+ * \param sig buffer holding the ciphertext
+ *
+ * \return 0 if the verify operation was successful,
+ * or an POLARSSL_ERR_RSA_XXX error code
+ *
+ * \note The "sig" buffer must be as large as the size
+ * of ctx->N (eg. 128 bytes if RSA-1024 is used).
+ *
+ * \note In case of PKCS#1 v2.1 encoding keep in mind that
+ * the hash_id in the RSA context is the one used for the
+ * verification. hash_id in the function call is the type of hash
+ * that is verified. According to RFC 3447 it is advised to
+ * keep both hashes the same.
+ */
+int rsa_pkcs1_verify( rsa_context *ctx,
+ int mode,
+ int hash_id,
+ unsigned int hashlen,
+ const unsigned char *hash,
+ unsigned char *sig );
+
+/**
+ * \brief Free the components of an RSA key
+ *
+ * \param ctx RSA Context to free
+ */
+void rsa_free( rsa_context *ctx );
+
+/**
+ * \brief Checkup routine
+ *
+ * \return 0 if successful, or 1 if the test failed
+ */
+int rsa_self_test( int verbose );
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* rsa.h */
diff --git a/drivers/misc/rsa/rsa_verify.c b/drivers/misc/rsa/rsa_verify.c
new file mode 100755
index 00000000..7d864016
--- /dev/null
+++ b/drivers/misc/rsa/rsa_verify.c
@@ -0,0 +1,572 @@
+/*
+ * The RSA public-key cryptosystem
+ *
+ * Copyright (C) 2006-2011, Brainspark B.V.
+ *
+ * This file is part of PolarSSL (http://www.polarssl.org)
+ * Lead Maintainer: Paul Bakker <polarssl_maintainer at polarssl.org>
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+/*
+ * RSA was designed by Ron Rivest, Adi Shamir and Len Adleman.
+ *
+ * http://theory.lcs.mit.edu/~rivest/rsapaper.pdf
+ * http://www.cacr.math.uwaterloo.ca/hac/about/chap8.pdf
+ */
+
+#include "rsa.h"
+#include "base64.h"
+#include "pem.h"
+#include "asn1.h"
+#include "x509.h"
+//#include <stdlib.h>
+//#include <stdio.h>
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <asm/io.h>
+#include <linux/mtd/partitions.h>
+#include <linux/platform_device.h>
+#include <mach/hardware.h>
+#include <linux/delay.h>
+
+
+
+static const unsigned char base64_dec_map[128] =
+{
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 62, 127, 127, 127, 63, 52, 53,
+ 54, 55, 56, 57, 58, 59, 60, 61, 127, 127,
+ 127, 64, 127, 127, 127, 0, 1, 2, 3, 4,
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
+ 25, 127, 127, 127, 127, 127, 127, 26, 27, 28,
+ 29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
+ 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, 127, 127, 127, 127, 127
+};
+
+/*
+ * Decode a base64-formatted buffer
+ */
+int base64_decode( unsigned char *dst, size_t *dlen,
+ const unsigned char *src, size_t slen )
+{
+ size_t i, j, n;
+ unsigned long x;
+ unsigned char *p;
+
+ for( i = j = n = 0; i < slen; i++ )
+ {
+ if( ( slen - i ) >= 2 &&
+ src[i] == '\r' && src[i + 1] == '\n' )
+ continue;
+
+ if( src[i] == '\n' )
+ continue;
+
+ if( src[i] == '=' && ++j > 2 )
+ return( POLARSSL_ERR_BASE64_INVALID_CHARACTER );
+
+ if( src[i] > 127 || base64_dec_map[src[i]] == 127 )
+ return( POLARSSL_ERR_BASE64_INVALID_CHARACTER );
+
+ if( base64_dec_map[src[i]] < 64 && j != 0 )
+ return( POLARSSL_ERR_BASE64_INVALID_CHARACTER );
+
+ n++;
+ }
+
+ if( n == 0 )
+ return( 0 );
+
+ n = ((n * 6) + 7) >> 3;
+
+ if( *dlen < n )
+ {
+ *dlen = n;
+ return( POLARSSL_ERR_BASE64_BUFFER_TOO_SMALL );
+ }
+
+ for( j = 3, n = x = 0, p = dst; i > 0; i--, src++ )
+ {
+ if( *src == '\r' || *src == '\n' )
+ continue;
+
+ j -= ( base64_dec_map[*src] == 64 );
+ x = (x << 6) | ( base64_dec_map[*src] & 0x3F );
+
+ if( ++n == 4 )
+ {
+ n = 0;
+ if( j > 0 ) *p++ = (unsigned char)( x >> 16 );
+ if( j > 1 ) *p++ = (unsigned char)( x >> 8 );
+ if( j > 2 ) *p++ = (unsigned char)( x );
+ }
+ }
+
+ *dlen = p - dst;
+
+ return( 0 );
+}
+
+int pem_read_buffer( pem_context *ctx, const unsigned char *data, const unsigned char *pwd, size_t pwdlen, size_t *use_len )
+{
+ int ret;
+ size_t len;
+ unsigned char *buf;
+
+ ((void) pwd);
+ ((void) pwdlen);
+
+ if( ctx == NULL )
+ return( POLARSSL_ERR_PEM_INVALID_DATA );
+
+
+
+ len = 0;
+ ret = base64_decode( NULL, &len, data, strlen((char *)data) );
+
+ if( ret == POLARSSL_ERR_BASE64_INVALID_CHARACTER )
+ return( POLARSSL_ERR_PEM_INVALID_DATA + ret );
+
+ if( ( buf = (unsigned char *) vmalloc( len ) ) == NULL )
+ return( POLARSSL_ERR_PEM_MALLOC_FAILED );
+
+ if( ( ret = base64_decode( buf, &len, data, strlen((char *)data) ) ) != 0 )
+ {
+ vfree( buf );
+ return( POLARSSL_ERR_PEM_INVALID_DATA + ret );
+ }
+
+
+ ctx->buf = buf;
+ ctx->buflen = len;
+ //*use_len = s2 - data;
+
+ return( 0 );
+}
+
+int asn1_get_len( unsigned char **p,
+ const unsigned char *end,
+ size_t *len )
+{
+ if( ( end - *p ) < 1 )
+ return( POLARSSL_ERR_ASN1_OUT_OF_DATA );
+
+ if( ( **p & 0x80 ) == 0 )
+ *len = *(*p)++;
+ else
+ {
+ switch( **p & 0x7F )
+ {
+ case 1:
+ if( ( end - *p ) < 2 )
+ return( POLARSSL_ERR_ASN1_OUT_OF_DATA );
+
+ *len = (*p)[1];
+ (*p) += 2;
+ break;
+
+ case 2:
+ if( ( end - *p ) < 3 )
+ return( POLARSSL_ERR_ASN1_OUT_OF_DATA );
+
+ *len = ( (*p)[1] << 8 ) | (*p)[2];
+ (*p) += 3;
+ break;
+
+ case 3:
+ if( ( end - *p ) < 4 )
+ return( POLARSSL_ERR_ASN1_OUT_OF_DATA );
+
+ *len = ( (*p)[1] << 16 ) | ( (*p)[2] << 8 ) | (*p)[3];
+ (*p) += 4;
+ break;
+
+ case 4:
+ if( ( end - *p ) < 5 )
+ return( POLARSSL_ERR_ASN1_OUT_OF_DATA );
+
+ *len = ( (*p)[1] << 24 ) | ( (*p)[2] << 16 ) | ( (*p)[3] << 8 ) | (*p)[4];
+ (*p) += 5;
+ break;
+
+ default:
+ return( POLARSSL_ERR_ASN1_INVALID_LENGTH );
+ }
+ }
+
+ if( *len > (size_t) ( end - *p ) )
+ return( POLARSSL_ERR_ASN1_OUT_OF_DATA );
+
+ return( 0 );
+}
+
+int asn1_get_tag( unsigned char **p,
+ const unsigned char *end,
+ size_t *len, int tag )
+{
+ if( ( end - *p ) < 1 )
+ return( POLARSSL_ERR_ASN1_OUT_OF_DATA );
+
+ if( **p != tag )
+ return( POLARSSL_ERR_ASN1_UNEXPECTED_TAG );
+
+ (*p)++;
+
+ return( asn1_get_len( p, end, len ) );
+}
+
+int asn1_get_mpi( unsigned char **p,
+ const unsigned char *end,
+ mpi *X )
+{
+ int ret;
+ size_t len;
+
+ if( ( ret = asn1_get_tag( p, end, &len, ASN1_INTEGER ) ) != 0 )
+ return( ret );
+
+ ret = mpi_read_binary( X, *p, len );
+
+ *p += len;
+
+ return( ret );
+}
+
+static int x509_get_alg( unsigned char **p,
+ const unsigned char *end,
+ x509_buf *alg )
+{
+ int ret;
+ size_t len;
+
+ if( ( ret = asn1_get_tag( p, end, &len,
+ ASN1_CONSTRUCTED | ASN1_SEQUENCE ) ) != 0 )
+ return( POLARSSL_ERR_X509_CERT_INVALID_ALG + ret );
+
+ end = *p + len;
+ alg->tag = **p;
+
+ if( ( ret = asn1_get_tag( p, end, &alg->len, ASN1_OID ) ) != 0 )
+ return( POLARSSL_ERR_X509_CERT_INVALID_ALG + ret );
+
+ alg->p = *p;
+ *p += alg->len;
+
+ if( *p == end )
+ return( 0 );
+
+ /*
+ * assume the algorithm parameters must be NULL
+ */
+ if( ( ret = asn1_get_tag( p, end, &len, ASN1_NULL ) ) != 0 )
+ return( POLARSSL_ERR_X509_CERT_INVALID_ALG + ret );
+
+ if( *p != end )
+ return( POLARSSL_ERR_X509_CERT_INVALID_ALG +
+ POLARSSL_ERR_ASN1_LENGTH_MISMATCH );
+
+ return( 0 );
+}
+
+static int x509_get_pubkey( unsigned char **p,
+ const unsigned char *end,
+ x509_buf *pk_alg_oid,
+ mpi *N, mpi *E )
+{
+ int ret, can_handle;
+ size_t len;
+ unsigned char *end2;
+
+ if( ( ret = x509_get_alg( p, end, pk_alg_oid ) ) != 0 )
+ return( ret );
+
+ /*
+ * only RSA public keys handled at this time
+ */
+ can_handle = 0;
+
+ if( pk_alg_oid->len == 9 &&
+ memcmp( pk_alg_oid->p, OID_PKCS1_RSA, 9 ) == 0 )
+ can_handle = 1;
+
+ if( pk_alg_oid->len == 9 &&
+ memcmp( pk_alg_oid->p, OID_PKCS1, 8 ) == 0 )
+ {
+ if( pk_alg_oid->p[8] >= 2 && pk_alg_oid->p[8] <= 5 )
+ can_handle = 1;
+
+ if ( pk_alg_oid->p[8] >= 11 && pk_alg_oid->p[8] <= 14 )
+ can_handle = 1;
+ }
+
+ if( pk_alg_oid->len == 5 &&
+ memcmp( pk_alg_oid->p, OID_RSA_SHA_OBS, 5 ) == 0 )
+ can_handle = 1;
+
+ if( can_handle == 0 )
+ return( POLARSSL_ERR_X509_UNKNOWN_PK_ALG );
+
+ if( ( ret = asn1_get_tag( p, end, &len, ASN1_BIT_STRING ) ) != 0 )
+ return( POLARSSL_ERR_X509_CERT_INVALID_PUBKEY + ret );
+
+ if( ( end - *p ) < 1 )
+ return( POLARSSL_ERR_X509_CERT_INVALID_PUBKEY +
+ POLARSSL_ERR_ASN1_OUT_OF_DATA );
+
+ end2 = *p + len;
+
+ if( *(*p)++ != 0 )
+ return( POLARSSL_ERR_X509_CERT_INVALID_PUBKEY );
+
+ /*
+ * RSAPublicKey ::= SEQUENCE {
+ * modulus INTEGER, -- n
+ * publicExponent INTEGER -- e
+ * }
+ */
+ if( ( ret = asn1_get_tag( p, end2, &len,
+ ASN1_CONSTRUCTED | ASN1_SEQUENCE ) ) != 0 )
+ return( POLARSSL_ERR_X509_CERT_INVALID_PUBKEY + ret );
+
+ if( *p + len != end2 )
+ return( POLARSSL_ERR_X509_CERT_INVALID_PUBKEY +
+ POLARSSL_ERR_ASN1_LENGTH_MISMATCH );
+
+ if( ( ret = asn1_get_mpi( p, end2, N ) ) != 0 ||
+ ( ret = asn1_get_mpi( p, end2, E ) ) != 0 )
+ return( POLARSSL_ERR_X509_CERT_INVALID_PUBKEY + ret );
+
+ if( *p != end )
+ return( POLARSSL_ERR_X509_CERT_INVALID_PUBKEY +
+ POLARSSL_ERR_ASN1_LENGTH_MISMATCH );
+
+ return( 0 );
+}
+
+
+void pem_free( pem_context *ctx )
+{
+ if( ctx->buf )
+ vfree( ctx->buf );
+
+ if( ctx->info )
+ vfree( ctx->info );
+
+ memset(ctx, 0, sizeof(pem_context));
+}
+
+int x509parse_public_key( rsa_context *rsa, const unsigned char *key, size_t keylen )
+{
+ int ret;
+ size_t len;
+ unsigned char *p, *end;
+ x509_buf alg_oid;
+ pem_context pem;
+
+ memset( &pem, 0, sizeof( pem_context ) );
+ ret = pem_read_buffer( &pem,
+ key, NULL, 0, &len );
+
+ if( ret == 0 )
+ {
+ /*
+ * Was PEM encoded
+ */
+ keylen = pem.buflen;
+
+ }
+ else if( ret != POLARSSL_ERR_PEM_NO_HEADER_PRESENT )
+ {
+ pem_free( &pem );
+ return( ret );
+ }
+
+ p = ( ret == 0 ) ? pem.buf : (unsigned char *) key;
+ end = p + keylen;
+
+ /*
+ * PublicKeyInfo ::= SEQUENCE {
+ * algorithm AlgorithmIdentifier,
+ * PublicKey BIT STRING
+ * }
+ *
+ * AlgorithmIdentifier ::= SEQUENCE {
+ * algorithm OBJECT IDENTIFIER,
+ * parameters ANY DEFINED BY algorithm OPTIONAL
+ * }
+ *
+ * RSAPublicKey ::= SEQUENCE {
+ * modulus INTEGER, -- n
+ * publicExponent INTEGER -- e
+ * }
+ */
+
+ if( ( ret = asn1_get_tag( &p, end, &len,
+ ASN1_CONSTRUCTED | ASN1_SEQUENCE ) ) != 0 )
+ {
+ pem_free( &pem );
+ rsa_free( rsa );
+ return( POLARSSL_ERR_X509_CERT_INVALID_FORMAT + ret );
+ }
+
+ if( ( ret = x509_get_pubkey( &p, end, &alg_oid, &rsa->N, &rsa->E ) ) != 0 )
+ {
+ pem_free( &pem );
+ rsa_free( rsa );
+ return( POLARSSL_ERR_X509_KEY_INVALID_FORMAT + ret );
+ }
+
+#if 0
+ if( ( ret = rsa_check_pubkey( rsa ) ) != 0 )
+ {
+#if defined(POLARSSL_PEM_C)
+ pem_free( &pem );
+#endif
+ rsa_free( rsa );
+ return( ret );
+ }
+#endif
+ rsa->len = mpi_size( &rsa->N );
+
+
+
+ pem_free( &pem );
+
+ return( 0 );
+}
+/*
+ * Initialize an RSA context
+ */
+void rsa_init( rsa_context *ctx, int padding, int hash_id)
+{
+ memset( ctx, 0, sizeof(rsa_context));
+
+ ctx->padding = padding;
+ ctx->hash_id = hash_id;
+}
+
+
+/*
+ * Do an RSA public key operation
+ */
+
+int rsa_public( rsa_context *ctx, const unsigned char *input, unsigned char *output)
+{
+ int ret;
+ size_t olen;
+ mpi T;
+
+ mpi_init( &T );
+
+ MPI_CHK( mpi_read_binary( &T, input, ctx->len ) );
+
+ if( mpi_cmp_mpi( &T, &ctx->N ) >= 0 ) {
+ mpi_free( &T );
+ return( POLARSSL_ERR_RSA_BAD_INPUT_DATA );
+ }
+
+ olen = ctx->len;
+ MPI_CHK( mpi_exp_mod( &T, &T, &ctx->E, &ctx->N, &ctx->RN ) );
+ MPI_CHK( mpi_write_binary( &T, output, olen ) );
+
+cleanup:
+
+ mpi_free( &T );
+
+ if( ret != 0 )
+ return( POLARSSL_ERR_RSA_PUBLIC_FAILED + ret );
+
+ return( 0 );
+}
+
+
+
+/*
+ * Free the components of an RSA key
+ */
+void rsa_free( rsa_context *ctx )
+{
+ mpi_free( &ctx->RQ ); mpi_free( &ctx->RP ); mpi_free( &ctx->RN );
+ mpi_free( &ctx->QP ); mpi_free( &ctx->DQ ); mpi_free( &ctx->DP );
+ mpi_free( &ctx->Q ); mpi_free( &ctx->P ); mpi_free( &ctx->D );
+ mpi_free( &ctx->E ); mpi_free( &ctx->N );
+}
+
+int rsa_check(unsigned int pub_key_addr, unsigned int pub_key_size, unsigned int sig_addr, unsigned int sig_size, u8 *out_buf)
+{
+
+ size_t len;
+ rsa_context rsa;
+ unsigned char key[1024];
+ unsigned char signature[1024];
+ unsigned char verified[1024];
+ int i;
+ unsigned char *tmp;
+ int ret, siglen;
+
+ rsa_init(&rsa, RSA_PKCS_V15, 0);
+
+ memcpy((void *)key, (void *)pub_key_addr, pub_key_size);
+ key[pub_key_size] = '\0';
+ len = pub_key_size;
+
+ x509parse_public_key(&rsa, key, len);
+
+ tmp = (unsigned char *) rsa.N.p;
+
+ memcpy(signature, (void *)sig_addr, sig_size);
+ signature[sig_size]='\0';
+
+ ret = rsa_public(&rsa, signature, verified);
+
+ if (ret){
+ printk("Signature verify failed!!!\n");
+ return -2;
+ }
+
+
+ if ((verified[0] != 0) && (verified[1] != RSA_SIGN)) {
+ printk("Signature verify failed!!!\n");
+ return -1;
+ }
+
+ tmp = &verified[2];
+ while (*tmp == 0xff) // skip padding
+ tmp++;
+ tmp++; // skip a terminator
+
+ siglen = rsa.len -( tmp - verified);
+
+ for (i = 0; i < siglen; i++) {
+ out_buf[i] = tmp[i];
+ }
+
+ rsa_free(&rsa);
+ return 0;
+}
+EXPORT_SYMBOL(rsa_check); \ No newline at end of file
diff --git a/drivers/misc/rsa/x509.h b/drivers/misc/rsa/x509.h
new file mode 100755
index 00000000..cd0dc84a
--- /dev/null
+++ b/drivers/misc/rsa/x509.h
@@ -0,0 +1,726 @@
+/**
+ * \file x509.h
+ *
+ * \brief X.509 certificate and private key decoding
+ *
+ * Copyright (C) 2006-2011, Brainspark B.V.
+ *
+ * This file is part of PolarSSL (http://www.polarssl.org)
+ * Lead Maintainer: Paul Bakker <polarssl_maintainer at polarssl.org>
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#ifndef POLARSSL_X509_H
+#define POLARSSL_X509_H
+
+#include "asn1.h"
+#include "rsa.h"
+#include "dhm.h"
+
+/**
+ * \addtogroup x509_module
+ * \{
+ */
+
+/**
+ * \name X509 Error codes
+ * \{
+ */
+#define POLARSSL_ERR_X509_FEATURE_UNAVAILABLE -0x2080 /**< Unavailable feature, e.g. RSA hashing/encryption combination. */
+#define POLARSSL_ERR_X509_CERT_INVALID_PEM -0x2100 /**< The PEM-encoded certificate contains invalid elements, e.g. invalid character. */
+#define POLARSSL_ERR_X509_CERT_INVALID_FORMAT -0x2180 /**< The certificate format is invalid, e.g. different type expected. */
+#define POLARSSL_ERR_X509_CERT_INVALID_VERSION -0x2200 /**< The certificate version element is invalid. */
+#define POLARSSL_ERR_X509_CERT_INVALID_SERIAL -0x2280 /**< The serial tag or value is invalid. */
+#define POLARSSL_ERR_X509_CERT_INVALID_ALG -0x2300 /**< The algorithm tag or value is invalid. */
+#define POLARSSL_ERR_X509_CERT_INVALID_NAME -0x2380 /**< The name tag or value is invalid. */
+#define POLARSSL_ERR_X509_CERT_INVALID_DATE -0x2400 /**< The date tag or value is invalid. */
+#define POLARSSL_ERR_X509_CERT_INVALID_PUBKEY -0x2480 /**< The pubkey tag or value is invalid (only RSA is supported). */
+#define POLARSSL_ERR_X509_CERT_INVALID_SIGNATURE -0x2500 /**< The signature tag or value invalid. */
+#define POLARSSL_ERR_X509_CERT_INVALID_EXTENSIONS -0x2580 /**< The extension tag or value is invalid. */
+#define POLARSSL_ERR_X509_CERT_UNKNOWN_VERSION -0x2600 /**< Certificate or CRL has an unsupported version number. */
+#define POLARSSL_ERR_X509_CERT_UNKNOWN_SIG_ALG -0x2680 /**< Signature algorithm (oid) is unsupported. */
+#define POLARSSL_ERR_X509_UNKNOWN_PK_ALG -0x2700 /**< Key algorithm is unsupported (only RSA is supported). */
+#define POLARSSL_ERR_X509_CERT_SIG_MISMATCH -0x2780 /**< Certificate signature algorithms do not match. (see \c ::x509_cert sig_oid) */
+#define POLARSSL_ERR_X509_CERT_VERIFY_FAILED -0x2800 /**< Certificate verification failed, e.g. CRL, CA or signature check failed. */
+#define POLARSSL_ERR_X509_KEY_INVALID_VERSION -0x2880 /**< Unsupported RSA key version */
+#define POLARSSL_ERR_X509_KEY_INVALID_FORMAT -0x2900 /**< Invalid RSA key tag or value. */
+#define POLARSSL_ERR_X509_CERT_UNKNOWN_FORMAT -0x2980 /**< Format not recognized as DER or PEM. */
+#define POLARSSL_ERR_X509_INVALID_INPUT -0x2A00 /**< Input invalid. */
+#define POLARSSL_ERR_X509_MALLOC_FAILED -0x2A80 /**< Allocation of memory failed. */
+#define POLARSSL_ERR_X509_FILE_IO_ERROR -0x2B00 /**< Read/write of file failed. */
+/* \} name */
+
+
+/**
+ * \name X509 Verify codes
+ * \{
+ */
+#define BADCERT_EXPIRED 0x01 /**< The certificate validity has expired. */
+#define BADCERT_REVOKED 0x02 /**< The certificate has been revoked (is on a CRL). */
+#define BADCERT_CN_MISMATCH 0x04 /**< The certificate Common Name (CN) does not match with the expected CN. */
+#define BADCERT_NOT_TRUSTED 0x08 /**< The certificate is not correctly signed by the trusted CA. */
+#define BADCRL_NOT_TRUSTED 0x10 /**< CRL is not correctly signed by the trusted CA. */
+#define BADCRL_EXPIRED 0x20 /**< CRL is expired. */
+#define BADCERT_MISSING 0x40 /**< Certificate was missing. */
+#define BADCERT_SKIP_VERIFY 0x80 /**< Certificate verification was skipped. */
+/* \} name */
+/* \} addtogroup x509_module */
+
+/*
+ * various object identifiers
+ */
+#define X520_COMMON_NAME 3
+#define X520_COUNTRY 6
+#define X520_LOCALITY 7
+#define X520_STATE 8
+#define X520_ORGANIZATION 10
+#define X520_ORG_UNIT 11
+#define PKCS9_EMAIL 1
+
+#define X509_OUTPUT_DER 0x01
+#define X509_OUTPUT_PEM 0x02
+#define PEM_LINE_LENGTH 72
+#define X509_ISSUER 0x01
+#define X509_SUBJECT 0x02
+
+#define OID_X520 "\x55\x04"
+#define OID_CN OID_X520 "\x03"
+
+#define OID_PKCS1 "\x2A\x86\x48\x86\xF7\x0D\x01\x01"
+#define OID_PKCS1_RSA OID_PKCS1 "\x01"
+
+#define OID_RSA_SHA_OBS "\x2B\x0E\x03\x02\x1D"
+
+#define OID_PKCS9 "\x2A\x86\x48\x86\xF7\x0D\x01\x09"
+#define OID_PKCS9_EMAIL OID_PKCS9 "\x01"
+
+/** ISO arc for standard certificate and CRL extensions */
+#define OID_ID_CE "\x55\x1D" /**< id-ce OBJECT IDENTIFIER ::= {joint-iso-ccitt(2) ds(5) 29} */
+
+/**
+ * Private Internet Extensions
+ * { iso(1) identified-organization(3) dod(6) internet(1)
+ * security(5) mechanisms(5) pkix(7) }
+ */
+#define OID_PKIX "\x2B\x06\x01\x05\x05\x07"
+
+/*
+ * OIDs for standard certificate extensions
+ */
+#define OID_AUTHORITY_KEY_IDENTIFIER OID_ID_CE "\x23" /**< id-ce-authorityKeyIdentifier OBJECT IDENTIFIER ::= { id-ce 35 } */
+#define OID_SUBJECT_KEY_IDENTIFIER OID_ID_CE "\x0E" /**< id-ce-subjectKeyIdentifier OBJECT IDENTIFIER ::= { id-ce 14 } */
+#define OID_KEY_USAGE OID_ID_CE "\x0F" /**< id-ce-keyUsage OBJECT IDENTIFIER ::= { id-ce 15 } */
+#define OID_CERTIFICATE_POLICIES OID_ID_CE "\x20" /**< id-ce-certificatePolicies OBJECT IDENTIFIER ::= { id-ce 32 } */
+#define OID_POLICY_MAPPINGS OID_ID_CE "\x21" /**< id-ce-policyMappings OBJECT IDENTIFIER ::= { id-ce 33 } */
+#define OID_SUBJECT_ALT_NAME OID_ID_CE "\x11" /**< id-ce-subjectAltName OBJECT IDENTIFIER ::= { id-ce 17 } */
+#define OID_ISSUER_ALT_NAME OID_ID_CE "\x12" /**< id-ce-issuerAltName OBJECT IDENTIFIER ::= { id-ce 18 } */
+#define OID_SUBJECT_DIRECTORY_ATTRS OID_ID_CE "\x09" /**< id-ce-subjectDirectoryAttributes OBJECT IDENTIFIER ::= { id-ce 9 } */
+#define OID_BASIC_CONSTRAINTS OID_ID_CE "\x13" /**< id-ce-basicConstraints OBJECT IDENTIFIER ::= { id-ce 19 } */
+#define OID_NAME_CONSTRAINTS OID_ID_CE "\x1E" /**< id-ce-nameConstraints OBJECT IDENTIFIER ::= { id-ce 30 } */
+#define OID_POLICY_CONSTRAINTS OID_ID_CE "\x24" /**< id-ce-policyConstraints OBJECT IDENTIFIER ::= { id-ce 36 } */
+#define OID_EXTENDED_KEY_USAGE OID_ID_CE "\x25" /**< id-ce-extKeyUsage OBJECT IDENTIFIER ::= { id-ce 37 } */
+#define OID_CRL_DISTRIBUTION_POINTS OID_ID_CE "\x1F" /**< id-ce-cRLDistributionPoints OBJECT IDENTIFIER ::= { id-ce 31 } */
+#define OID_INIHIBIT_ANYPOLICY OID_ID_CE "\x36" /**< id-ce-inhibitAnyPolicy OBJECT IDENTIFIER ::= { id-ce 54 } */
+#define OID_FRESHEST_CRL OID_ID_CE "\x2E" /**< id-ce-freshestCRL OBJECT IDENTIFIER ::= { id-ce 46 } */
+
+/*
+ * X.509 v3 Key Usage Extension flags
+ */
+#define KU_DIGITAL_SIGNATURE (0x80) /* bit 0 */
+#define KU_NON_REPUDIATION (0x40) /* bit 1 */
+#define KU_KEY_ENCIPHERMENT (0x20) /* bit 2 */
+#define KU_DATA_ENCIPHERMENT (0x10) /* bit 3 */
+#define KU_KEY_AGREEMENT (0x08) /* bit 4 */
+#define KU_KEY_CERT_SIGN (0x04) /* bit 5 */
+#define KU_CRL_SIGN (0x02) /* bit 6 */
+
+/*
+ * X.509 v3 Extended key usage OIDs
+ */
+#define OID_ANY_EXTENDED_KEY_USAGE OID_EXTENDED_KEY_USAGE "\x00" /**< anyExtendedKeyUsage OBJECT IDENTIFIER ::= { id-ce-extKeyUsage 0 } */
+
+#define OID_KP OID_PKIX "\x03" /**< id-kp OBJECT IDENTIFIER ::= { id-pkix 3 } */
+#define OID_SERVER_AUTH OID_KP "\x01" /**< id-kp-serverAuth OBJECT IDENTIFIER ::= { id-kp 1 } */
+#define OID_CLIENT_AUTH OID_KP "\x02" /**< id-kp-clientAuth OBJECT IDENTIFIER ::= { id-kp 2 } */
+#define OID_CODE_SIGNING OID_KP "\x03" /**< id-kp-codeSigning OBJECT IDENTIFIER ::= { id-kp 3 } */
+#define OID_EMAIL_PROTECTION OID_KP "\x04" /**< id-kp-emailProtection OBJECT IDENTIFIER ::= { id-kp 4 } */
+#define OID_TIME_STAMPING OID_KP "\x08" /**< id-kp-timeStamping OBJECT IDENTIFIER ::= { id-kp 8 } */
+#define OID_OCSP_SIGNING OID_KP "\x09" /**< id-kp-OCSPSigning OBJECT IDENTIFIER ::= { id-kp 9 } */
+
+#define STRING_SERVER_AUTH "TLS Web Server Authentication"
+#define STRING_CLIENT_AUTH "TLS Web Client Authentication"
+#define STRING_CODE_SIGNING "Code Signing"
+#define STRING_EMAIL_PROTECTION "E-mail Protection"
+#define STRING_TIME_STAMPING "Time Stamping"
+#define STRING_OCSP_SIGNING "OCSP Signing"
+
+/*
+ * OIDs for CRL extensions
+ */
+#define OID_PRIVATE_KEY_USAGE_PERIOD OID_ID_CE "\x10"
+#define OID_CRL_NUMBER OID_ID_CE "\x14" /**< id-ce-cRLNumber OBJECT IDENTIFIER ::= { id-ce 20 } */
+
+/*
+ * Netscape certificate extensions
+ */
+#define OID_NETSCAPE "\x60\x86\x48\x01\x86\xF8\x42" /**< Netscape OID */
+#define OID_NS_CERT OID_NETSCAPE "\x01"
+#define OID_NS_CERT_TYPE OID_NS_CERT "\x01"
+#define OID_NS_BASE_URL OID_NS_CERT "\x02"
+#define OID_NS_REVOCATION_URL OID_NS_CERT "\x03"
+#define OID_NS_CA_REVOCATION_URL OID_NS_CERT "\x04"
+#define OID_NS_RENEWAL_URL OID_NS_CERT "\x07"
+#define OID_NS_CA_POLICY_URL OID_NS_CERT "\x08"
+#define OID_NS_SSL_SERVER_NAME OID_NS_CERT "\x0C"
+#define OID_NS_COMMENT OID_NS_CERT "\x0D"
+#define OID_NS_DATA_TYPE OID_NETSCAPE "\x02"
+#define OID_NS_CERT_SEQUENCE OID_NS_DATA_TYPE "\x05"
+
+/*
+ * Netscape certificate types
+ * (http://www.mozilla.org/projects/security/pki/nss/tech-notes/tn3.html)
+ */
+
+#define NS_CERT_TYPE_SSL_CLIENT (0x80) /* bit 0 */
+#define NS_CERT_TYPE_SSL_SERVER (0x40) /* bit 1 */
+#define NS_CERT_TYPE_EMAIL (0x20) /* bit 2 */
+#define NS_CERT_TYPE_OBJECT_SIGNING (0x10) /* bit 3 */
+#define NS_CERT_TYPE_RESERVED (0x08) /* bit 4 */
+#define NS_CERT_TYPE_SSL_CA (0x04) /* bit 5 */
+#define NS_CERT_TYPE_EMAIL_CA (0x02) /* bit 6 */
+#define NS_CERT_TYPE_OBJECT_SIGNING_CA (0x01) /* bit 7 */
+
+#define EXT_AUTHORITY_KEY_IDENTIFIER (1 << 0)
+#define EXT_SUBJECT_KEY_IDENTIFIER (1 << 1)
+#define EXT_KEY_USAGE (1 << 2)
+#define EXT_CERTIFICATE_POLICIES (1 << 3)
+#define EXT_POLICY_MAPPINGS (1 << 4)
+#define EXT_SUBJECT_ALT_NAME (1 << 5)
+#define EXT_ISSUER_ALT_NAME (1 << 6)
+#define EXT_SUBJECT_DIRECTORY_ATTRS (1 << 7)
+#define EXT_BASIC_CONSTRAINTS (1 << 8)
+#define EXT_NAME_CONSTRAINTS (1 << 9)
+#define EXT_POLICY_CONSTRAINTS (1 << 10)
+#define EXT_EXTENDED_KEY_USAGE (1 << 11)
+#define EXT_CRL_DISTRIBUTION_POINTS (1 << 12)
+#define EXT_INIHIBIT_ANYPOLICY (1 << 13)
+#define EXT_FRESHEST_CRL (1 << 14)
+
+#define EXT_NS_CERT_TYPE (1 << 16)
+
+/*
+ * Storage format identifiers
+ * Recognized formats: PEM and DER
+ */
+#define X509_FORMAT_DER 1
+#define X509_FORMAT_PEM 2
+
+/**
+ * \addtogroup x509_module
+ * \{ */
+
+/**
+ * \name Structures for parsing X.509 certificates and CRLs
+ * \{
+ */
+
+/**
+ * Type-length-value structure that allows for ASN1 using DER.
+ */
+typedef asn1_buf x509_buf;
+
+/**
+ * Container for ASN1 bit strings.
+ */
+typedef asn1_bitstring x509_bitstring;
+
+/**
+ * Container for ASN1 named information objects.
+ * It allows for Relative Distinguished Names (e.g. cn=polarssl,ou=code,etc.).
+ */
+typedef struct _x509_name
+{
+ x509_buf oid; /**< The object identifier. */
+ x509_buf val; /**< The named value. */
+ struct _x509_name *next; /**< The next named information object. */
+}
+x509_name;
+
+/**
+ * Container for a sequence of ASN.1 items
+ */
+typedef asn1_sequence x509_sequence;
+
+/** Container for date and time (precision in seconds). */
+typedef struct _x509_time
+{
+ int year, mon, day; /**< Date. */
+ int hour, min, sec; /**< Time. */
+}
+x509_time;
+
+/**
+ * Container for an X.509 certificate. The certificate may be chained.
+ */
+typedef struct _x509_cert
+{
+ x509_buf raw; /**< The raw certificate data (DER). */
+ x509_buf tbs; /**< The raw certificate body (DER). The part that is To Be Signed. */
+
+ int version; /**< The X.509 version. (0=v1, 1=v2, 2=v3) */
+ x509_buf serial; /**< Unique id for certificate issued by a specific CA. */
+ x509_buf sig_oid1; /**< Signature algorithm, e.g. sha1RSA */
+
+ x509_buf issuer_raw; /**< The raw issuer data (DER). Used for quick comparison. */
+ x509_buf subject_raw; /**< The raw subject data (DER). Used for quick comparison. */
+
+ x509_name issuer; /**< The parsed issuer data (named information object). */
+ x509_name subject; /**< The parsed subject data (named information object). */
+
+ x509_time valid_from; /**< Start time of certificate validity. */
+ x509_time valid_to; /**< End time of certificate validity. */
+
+ x509_buf pk_oid; /**< Subject public key info. Includes the public key algorithm and the key itself. */
+ rsa_context rsa; /**< Container for the RSA context. Only RSA is supported for public keys at this time. */
+
+ x509_buf issuer_id; /**< Optional X.509 v2/v3 issuer unique identifier. */
+ x509_buf subject_id; /**< Optional X.509 v2/v3 subject unique identifier. */
+ x509_buf v3_ext; /**< Optional X.509 v3 extensions. Only Basic Contraints are supported at this time. */
+
+ int ext_types; /**< Bit string containing detected and parsed extensions */
+ int ca_istrue; /**< Optional Basic Constraint extension value: 1 if this certificate belongs to a CA, 0 otherwise. */
+ int max_pathlen; /**< Optional Basic Constraint extension value: The maximum path length to the root certificate. */
+
+ unsigned char key_usage; /**< Optional key usage extension value: See the values below */
+
+ x509_sequence ext_key_usage; /**< Optional list of extended key usage OIDs. */
+
+ unsigned char ns_cert_type; /**< Optional Netscape certificate type extension value: See the values below */
+
+ x509_buf sig_oid2; /**< Signature algorithm. Must match sig_oid1. */
+ x509_buf sig; /**< Signature: hash of the tbs part signed with the private key. */
+ int sig_alg; /**< Internal representation of the signature algorithm, e.g. SIG_RSA_MD2 */
+
+ struct _x509_cert *next; /**< Next certificate in the CA-chain. */
+}
+x509_cert;
+
+/**
+ * Certificate revocation list entry.
+ * Contains the CA-specific serial numbers and revocation dates.
+ */
+typedef struct _x509_crl_entry
+{
+ x509_buf raw;
+
+ x509_buf serial;
+
+ x509_time revocation_date;
+
+ x509_buf entry_ext;
+
+ struct _x509_crl_entry *next;
+}
+x509_crl_entry;
+
+/**
+ * Certificate revocation list structure.
+ * Every CRL may have multiple entries.
+ */
+typedef struct _x509_crl
+{
+ x509_buf raw; /**< The raw certificate data (DER). */
+ x509_buf tbs; /**< The raw certificate body (DER). The part that is To Be Signed. */
+
+ int version;
+ x509_buf sig_oid1;
+
+ x509_buf issuer_raw; /**< The raw issuer data (DER). */
+
+ x509_name issuer; /**< The parsed issuer data (named information object). */
+
+ x509_time this_update;
+ x509_time next_update;
+
+ x509_crl_entry entry; /**< The CRL entries containing the certificate revocation times for this CA. */
+
+ x509_buf crl_ext;
+
+ x509_buf sig_oid2;
+ x509_buf sig;
+ int sig_alg;
+
+ struct _x509_crl *next;
+}
+x509_crl;
+/** \} name Structures for parsing X.509 certificates and CRLs */
+/** \} addtogroup x509_module */
+
+/**
+ * \name Structures for writing X.509 certificates.
+ * XvP: commented out as they are not used.
+ * - <tt>typedef struct _x509_node x509_node;</tt>
+ * - <tt>typedef struct _x509_raw x509_raw;</tt>
+ */
+/*
+typedef struct _x509_node
+{
+ unsigned char *data;
+ unsigned char *p;
+ unsigned char *end;
+
+ size_t len;
+}
+x509_node;
+
+typedef struct _x509_raw
+{
+ x509_node raw;
+ x509_node tbs;
+
+ x509_node version;
+ x509_node serial;
+ x509_node tbs_signalg;
+ x509_node issuer;
+ x509_node validity;
+ x509_node subject;
+ x509_node subpubkey;
+
+ x509_node signalg;
+ x509_node sign;
+}
+x509_raw;
+*/
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * \name Functions to read in DHM parameters, a certificate, CRL or private RSA key
+ * \{
+ */
+
+/** \ingroup x509_module */
+/**
+ * \brief Parse one or more certificates and add them
+ * to the chained list. Parses permissively. If some
+ * certificates can be parsed, the result is the number
+ * of failed certificates it encountered. If none complete
+ * correctly, the first error is returned.
+ *
+ * \param chain points to the start of the chain
+ * \param buf buffer holding the certificate data
+ * \param buflen size of the buffer
+ *
+ * \return 0 if all certificates parsed successfully, a positive number
+ * if partly successful or a specific X509 or PEM error code
+ */
+int x509parse_crt( x509_cert *chain, const unsigned char *buf, size_t buflen );
+
+/** \ingroup x509_module */
+/**
+ * \brief Load one or more certificates and add them
+ * to the chained list. Parses permissively. If some
+ * certificates can be parsed, the result is the number
+ * of failed certificates it encountered. If none complete
+ * correctly, the first error is returned.
+ *
+ * \param chain points to the start of the chain
+ * \param path filename to read the certificates from
+ *
+ * \return 0 if all certificates parsed successfully, a positive number
+ * if partly successful or a specific X509 or PEM error code
+ */
+int x509parse_crtfile( x509_cert *chain, const char *path );
+
+/** \ingroup x509_module */
+/**
+ * \brief Parse one or more CRLs and add them
+ * to the chained list
+ *
+ * \param chain points to the start of the chain
+ * \param buf buffer holding the CRL data
+ * \param buflen size of the buffer
+ *
+ * \return 0 if successful, or a specific X509 or PEM error code
+ */
+int x509parse_crl( x509_crl *chain, const unsigned char *buf, size_t buflen );
+
+/** \ingroup x509_module */
+/**
+ * \brief Load one or more CRLs and add them
+ * to the chained list
+ *
+ * \param chain points to the start of the chain
+ * \param path filename to read the CRLs from
+ *
+ * \return 0 if successful, or a specific X509 or PEM error code
+ */
+int x509parse_crlfile( x509_crl *chain, const char *path );
+
+/** \ingroup x509_module */
+/**
+ * \brief Parse a private RSA key
+ *
+ * \param rsa RSA context to be initialized
+ * \param key input buffer
+ * \param keylen size of the buffer
+ * \param pwd password for decryption (optional)
+ * \param pwdlen size of the password
+ *
+ * \return 0 if successful, or a specific X509 or PEM error code
+ */
+int x509parse_key( rsa_context *rsa,
+ const unsigned char *key, size_t keylen,
+ const unsigned char *pwd, size_t pwdlen );
+
+/** \ingroup x509_module */
+/**
+ * \brief Load and parse a private RSA key
+ *
+ * \param rsa RSA context to be initialized
+ * \param path filename to read the private key from
+ * \param password password to decrypt the file (can be NULL)
+ *
+ * \return 0 if successful, or a specific X509 or PEM error code
+ */
+int x509parse_keyfile( rsa_context *rsa, const char *path,
+ const char *password );
+
+/** \ingroup x509_module */
+/**
+ * \brief Parse a public RSA key
+ *
+ * \param rsa RSA context to be initialized
+ * \param key input buffer
+ * \param keylen size of the buffer
+ *
+ * \return 0 if successful, or a specific X509 or PEM error code
+ */
+int x509parse_public_key( rsa_context *rsa,
+ const unsigned char *key, size_t keylen );
+
+/** \ingroup x509_module */
+/**
+ * \brief Load and parse a public RSA key
+ *
+ * \param rsa RSA context to be initialized
+ * \param path filename to read the private key from
+ *
+ * \return 0 if successful, or a specific X509 or PEM error code
+ */
+int x509parse_public_keyfile( rsa_context *rsa, const char *path );
+
+/** \ingroup x509_module */
+/**
+ * \brief Parse DHM parameters
+ *
+ * \param dhm DHM context to be initialized
+ * \param dhmin input buffer
+ * \param dhminlen size of the buffer
+ *
+ * \return 0 if successful, or a specific X509 or PEM error code
+ */
+int x509parse_dhm( dhm_context *dhm, const unsigned char *dhmin, size_t dhminlen );
+
+/** \ingroup x509_module */
+/**
+ * \brief Load and parse DHM parameters
+ *
+ * \param dhm DHM context to be initialized
+ * \param path filename to read the DHM Parameters from
+ *
+ * \return 0 if successful, or a specific X509 or PEM error code
+ */
+int x509parse_dhmfile( dhm_context *dhm, const char *path );
+
+/** \} name Functions to read in DHM parameters, a certificate, CRL or private RSA key */
+
+/**
+ * \brief Store the certificate DN in printable form into buf;
+ * no more than size characters will be written.
+ *
+ * \param buf Buffer to write to
+ * \param size Maximum size of buffer
+ * \param dn The X509 name to represent
+ *
+ * \return The amount of data written to the buffer, or -1 in
+ * case of an error.
+ */
+int x509parse_dn_gets( char *buf, size_t size, const x509_name *dn );
+
+/**
+ * \brief Store the certificate serial in printable form into buf;
+ * no more than size characters will be written.
+ *
+ * \param buf Buffer to write to
+ * \param size Maximum size of buffer
+ * \param serial The X509 serial to represent
+ *
+ * \return The amount of data written to the buffer, or -1 in
+ * case of an error.
+ */
+int x509parse_serial_gets( char *buf, size_t size, const x509_buf *serial );
+
+/**
+ * \brief Returns an informational string about the
+ * certificate.
+ *
+ * \param buf Buffer to write to
+ * \param size Maximum size of buffer
+ * \param prefix A line prefix
+ * \param crt The X509 certificate to represent
+ *
+ * \return The amount of data written to the buffer, or -1 in
+ * case of an error.
+ */
+int x509parse_cert_info( char *buf, size_t size, const char *prefix,
+ const x509_cert *crt );
+
+/**
+ * \brief Returns an informational string about the
+ * CRL.
+ *
+ * \param buf Buffer to write to
+ * \param size Maximum size of buffer
+ * \param prefix A line prefix
+ * \param crl The X509 CRL to represent
+ *
+ * \return The amount of data written to the buffer, or -1 in
+ * case of an error.
+ */
+int x509parse_crl_info( char *buf, size_t size, const char *prefix,
+ const x509_crl *crl );
+
+/**
+ * \brief Give an known OID, return its descriptive string.
+ *
+ * \param oid buffer containing the oid
+ *
+ * \return Return a string if the OID is known,
+ * or NULL otherwise.
+ */
+const char *x509_oid_get_description( x509_buf *oid );
+
+/*
+ * \brief Give an OID, return a string version of its OID number.
+ *
+ * \param buf Buffer to write to
+ * \param size Maximum size of buffer
+ * \param oid Buffer containing the OID
+ *
+ * \return The amount of data written to the buffer, or -1 in
+ * case of an error.
+ */
+int x509_oid_get_numeric_string( char *buf, size_t size, x509_buf *oid );
+
+/**
+ * \brief Check a given x509_time against the system time and check
+ * if it is valid.
+ *
+ * \param time x509_time to check
+ *
+ * \return Return 0 if the x509_time is still valid,
+ * or 1 otherwise.
+ */
+int x509parse_time_expired( const x509_time *time );
+
+/**
+ * \name Functions to verify a certificate
+ * \{
+ */
+/** \ingroup x509_module */
+/**
+ * \brief Verify the certificate signature
+ *
+ * \param crt a certificate to be verified
+ * \param trust_ca the trusted CA chain
+ * \param ca_crl the CRL chain for trusted CA's
+ * \param cn expected Common Name (can be set to
+ * NULL if the CN must not be verified)
+ * \param flags result of the verification
+ * \param f_vrfy verification function
+ * \param p_vrfy verification parameter
+ *
+ * \return 0 if successful or POLARSSL_ERR_X509_SIG_VERIFY_FAILED,
+ * in which case *flags will have one or more of
+ * the following values set:
+ * BADCERT_EXPIRED --
+ * BADCERT_REVOKED --
+ * BADCERT_CN_MISMATCH --
+ * BADCERT_NOT_TRUSTED
+ *
+ * \note TODO: add two arguments, depth and crl
+ */
+int x509parse_verify( x509_cert *crt,
+ x509_cert *trust_ca,
+ x509_crl *ca_crl,
+ const char *cn, int *flags,
+ int (*f_vrfy)(void *, x509_cert *, int, int),
+ void *p_vrfy );
+
+/**
+ * \brief Verify the certificate signature
+ *
+ * \param crt a certificate to be verified
+ * \param crl the CRL to verify against
+ *
+ * \return 1 if the certificate is revoked, 0 otherwise
+ *
+ */
+int x509parse_revoked( const x509_cert *crt, const x509_crl *crl );
+
+/** \} name Functions to verify a certificate */
+
+
+
+/**
+ * \name Functions to clear a certificate, CRL or private RSA key
+ * \{
+ */
+/** \ingroup x509_module */
+/**
+ * \brief Unallocate all certificate data
+ *
+ * \param crt Certificate chain to free
+ */
+void x509_free( x509_cert *crt );
+
+/** \ingroup x509_module */
+/**
+ * \brief Unallocate all CRL data
+ *
+ * \param crl CRL chain to free
+ */
+void x509_crl_free( x509_crl *crl );
+
+/** \} name Functions to clear a certificate, CRL or private RSA key */
+
+
+/**
+ * \brief Checkup routine
+ *
+ * \return 0 if successful, or 1 if the test failed
+ */
+int x509_self_test( int verbose );
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* x509.h */
diff --git a/drivers/misc/sgi-gru/Makefile b/drivers/misc/sgi-gru/Makefile
new file mode 100644
index 00000000..0003a1d5
--- /dev/null
+++ b/drivers/misc/sgi-gru/Makefile
@@ -0,0 +1,5 @@
+ccflags-$(CONFIG_SGI_GRU_DEBUG) := -DDEBUG
+
+obj-$(CONFIG_SGI_GRU) := gru.o
+gru-y := grufile.o grumain.o grufault.o grutlbpurge.o gruprocfs.o grukservices.o gruhandles.o grukdump.o
+
diff --git a/drivers/misc/sgi-gru/gru.h b/drivers/misc/sgi-gru/gru.h
new file mode 100644
index 00000000..3ad76cd1
--- /dev/null
+++ b/drivers/misc/sgi-gru/gru.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __GRU_H__
+#define __GRU_H__
+
+/*
+ * GRU architectural definitions
+ */
+#define GRU_CACHE_LINE_BYTES 64
+#define GRU_HANDLE_STRIDE 256
+#define GRU_CB_BASE 0
+#define GRU_DS_BASE 0x20000
+
+/*
+ * Size used to map GRU GSeg
+ */
+#if defined(CONFIG_IA64)
+#define GRU_GSEG_PAGESIZE (256 * 1024UL)
+#elif defined(CONFIG_X86_64)
+#define GRU_GSEG_PAGESIZE (256 * 1024UL) /* ZZZ 2MB ??? */
+#else
+#error "Unsupported architecture"
+#endif
+
+/*
+ * Structure for obtaining GRU resource information
+ */
+struct gru_chiplet_info {
+ int node;
+ int chiplet;
+ int blade;
+ int total_dsr_bytes;
+ int total_cbr;
+ int total_user_dsr_bytes;
+ int total_user_cbr;
+ int free_user_dsr_bytes;
+ int free_user_cbr;
+};
+
+/*
+ * Statictics kept for each context.
+ */
+struct gru_gseg_statistics {
+ unsigned long fmm_tlbmiss;
+ unsigned long upm_tlbmiss;
+ unsigned long tlbdropin;
+ unsigned long context_stolen;
+ unsigned long reserved[10];
+};
+
+/* Flags for GRU options on the gru_create_context() call */
+/* Select one of the follow 4 options to specify how TLB misses are handled */
+#define GRU_OPT_MISS_DEFAULT 0x0000 /* Use default mode */
+#define GRU_OPT_MISS_USER_POLL 0x0001 /* User will poll CB for faults */
+#define GRU_OPT_MISS_FMM_INTR 0x0002 /* Send interrupt to cpu to
+ handle fault */
+#define GRU_OPT_MISS_FMM_POLL 0x0003 /* Use system polling thread */
+#define GRU_OPT_MISS_MASK 0x0003 /* Mask for TLB MISS option */
+
+
+
+#endif /* __GRU_H__ */
diff --git a/drivers/misc/sgi-gru/gru_instructions.h b/drivers/misc/sgi-gru/gru_instructions.h
new file mode 100644
index 00000000..04d5170a
--- /dev/null
+++ b/drivers/misc/sgi-gru/gru_instructions.h
@@ -0,0 +1,736 @@
+/*
+ * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __GRU_INSTRUCTIONS_H__
+#define __GRU_INSTRUCTIONS_H__
+
+extern int gru_check_status_proc(void *cb);
+extern int gru_wait_proc(void *cb);
+extern void gru_wait_abort_proc(void *cb);
+
+
+
+/*
+ * Architecture dependent functions
+ */
+
+#if defined(CONFIG_IA64)
+#include <linux/compiler.h>
+#include <asm/intrinsics.h>
+#define __flush_cache(p) ia64_fc((unsigned long)p)
+/* Use volatile on IA64 to ensure ordering via st4.rel */
+#define gru_ordered_store_ulong(p, v) \
+ do { \
+ barrier(); \
+ *((volatile unsigned long *)(p)) = v; /* force st.rel */ \
+ } while (0)
+#elif defined(CONFIG_X86_64)
+#include <asm/cacheflush.h>
+#define __flush_cache(p) clflush(p)
+#define gru_ordered_store_ulong(p, v) \
+ do { \
+ barrier(); \
+ *(unsigned long *)p = v; \
+ } while (0)
+#else
+#error "Unsupported architecture"
+#endif
+
+/*
+ * Control block status and exception codes
+ */
+#define CBS_IDLE 0
+#define CBS_EXCEPTION 1
+#define CBS_ACTIVE 2
+#define CBS_CALL_OS 3
+
+/* CB substatus bitmasks */
+#define CBSS_MSG_QUEUE_MASK 7
+#define CBSS_IMPLICIT_ABORT_ACTIVE_MASK 8
+
+/* CB substatus message queue values (low 3 bits of substatus) */
+#define CBSS_NO_ERROR 0
+#define CBSS_LB_OVERFLOWED 1
+#define CBSS_QLIMIT_REACHED 2
+#define CBSS_PAGE_OVERFLOW 3
+#define CBSS_AMO_NACKED 4
+#define CBSS_PUT_NACKED 5
+
+/*
+ * Structure used to fetch exception detail for CBs that terminate with
+ * CBS_EXCEPTION
+ */
+struct control_block_extended_exc_detail {
+ unsigned long cb;
+ int opc;
+ int ecause;
+ int exopc;
+ long exceptdet0;
+ int exceptdet1;
+ int cbrstate;
+ int cbrexecstatus;
+};
+
+/*
+ * Instruction formats
+ */
+
+/*
+ * Generic instruction format.
+ * This definition has precise bit field definitions.
+ */
+struct gru_instruction_bits {
+ /* DW 0 - low */
+ unsigned int icmd: 1;
+ unsigned char ima: 3; /* CB_DelRep, unmapped mode */
+ unsigned char reserved0: 4;
+ unsigned int xtype: 3;
+ unsigned int iaa0: 2;
+ unsigned int iaa1: 2;
+ unsigned char reserved1: 1;
+ unsigned char opc: 8; /* opcode */
+ unsigned char exopc: 8; /* extended opcode */
+ /* DW 0 - high */
+ unsigned int idef2: 22; /* TRi0 */
+ unsigned char reserved2: 2;
+ unsigned char istatus: 2;
+ unsigned char isubstatus:4;
+ unsigned char reserved3: 1;
+ unsigned char tlb_fault_color: 1;
+ /* DW 1 */
+ unsigned long idef4; /* 42 bits: TRi1, BufSize */
+ /* DW 2-6 */
+ unsigned long idef1; /* BAddr0 */
+ unsigned long idef5; /* Nelem */
+ unsigned long idef6; /* Stride, Operand1 */
+ unsigned long idef3; /* BAddr1, Value, Operand2 */
+ unsigned long reserved4;
+ /* DW 7 */
+ unsigned long avalue; /* AValue */
+};
+
+/*
+ * Generic instruction with friendlier names. This format is used
+ * for inline instructions.
+ */
+struct gru_instruction {
+ /* DW 0 */
+ union {
+ unsigned long op64; /* icmd,xtype,iaa0,ima,opc,tri0 */
+ struct {
+ unsigned int op32;
+ unsigned int tri0;
+ };
+ };
+ unsigned long tri1_bufsize; /* DW 1 */
+ unsigned long baddr0; /* DW 2 */
+ unsigned long nelem; /* DW 3 */
+ unsigned long op1_stride; /* DW 4 */
+ unsigned long op2_value_baddr1; /* DW 5 */
+ unsigned long reserved0; /* DW 6 */
+ unsigned long avalue; /* DW 7 */
+};
+
+/* Some shifts and masks for the low 64 bits of a GRU command */
+#define GRU_CB_ICMD_SHFT 0
+#define GRU_CB_ICMD_MASK 0x1
+#define GRU_CB_XTYPE_SHFT 8
+#define GRU_CB_XTYPE_MASK 0x7
+#define GRU_CB_IAA0_SHFT 11
+#define GRU_CB_IAA0_MASK 0x3
+#define GRU_CB_IAA1_SHFT 13
+#define GRU_CB_IAA1_MASK 0x3
+#define GRU_CB_IMA_SHFT 1
+#define GRU_CB_IMA_MASK 0x3
+#define GRU_CB_OPC_SHFT 16
+#define GRU_CB_OPC_MASK 0xff
+#define GRU_CB_EXOPC_SHFT 24
+#define GRU_CB_EXOPC_MASK 0xff
+#define GRU_IDEF2_SHFT 32
+#define GRU_IDEF2_MASK 0x3ffff
+#define GRU_ISTATUS_SHFT 56
+#define GRU_ISTATUS_MASK 0x3
+
+/* GRU instruction opcodes (opc field) */
+#define OP_NOP 0x00
+#define OP_BCOPY 0x01
+#define OP_VLOAD 0x02
+#define OP_IVLOAD 0x03
+#define OP_VSTORE 0x04
+#define OP_IVSTORE 0x05
+#define OP_VSET 0x06
+#define OP_IVSET 0x07
+#define OP_MESQ 0x08
+#define OP_GAMXR 0x09
+#define OP_GAMIR 0x0a
+#define OP_GAMIRR 0x0b
+#define OP_GAMER 0x0c
+#define OP_GAMERR 0x0d
+#define OP_BSTORE 0x0e
+#define OP_VFLUSH 0x0f
+
+
+/* Extended opcodes values (exopc field) */
+
+/* GAMIR - AMOs with implicit operands */
+#define EOP_IR_FETCH 0x01 /* Plain fetch of memory */
+#define EOP_IR_CLR 0x02 /* Fetch and clear */
+#define EOP_IR_INC 0x05 /* Fetch and increment */
+#define EOP_IR_DEC 0x07 /* Fetch and decrement */
+#define EOP_IR_QCHK1 0x0d /* Queue check, 64 byte msg */
+#define EOP_IR_QCHK2 0x0e /* Queue check, 128 byte msg */
+
+/* GAMIRR - Registered AMOs with implicit operands */
+#define EOP_IRR_FETCH 0x01 /* Registered fetch of memory */
+#define EOP_IRR_CLR 0x02 /* Registered fetch and clear */
+#define EOP_IRR_INC 0x05 /* Registered fetch and increment */
+#define EOP_IRR_DEC 0x07 /* Registered fetch and decrement */
+#define EOP_IRR_DECZ 0x0f /* Registered fetch and decrement, update on zero*/
+
+/* GAMER - AMOs with explicit operands */
+#define EOP_ER_SWAP 0x00 /* Exchange argument and memory */
+#define EOP_ER_OR 0x01 /* Logical OR with memory */
+#define EOP_ER_AND 0x02 /* Logical AND with memory */
+#define EOP_ER_XOR 0x03 /* Logical XOR with memory */
+#define EOP_ER_ADD 0x04 /* Add value to memory */
+#define EOP_ER_CSWAP 0x08 /* Compare with operand2, write operand1 if match*/
+#define EOP_ER_CADD 0x0c /* Queue check, operand1*64 byte msg */
+
+/* GAMERR - Registered AMOs with explicit operands */
+#define EOP_ERR_SWAP 0x00 /* Exchange argument and memory */
+#define EOP_ERR_OR 0x01 /* Logical OR with memory */
+#define EOP_ERR_AND 0x02 /* Logical AND with memory */
+#define EOP_ERR_XOR 0x03 /* Logical XOR with memory */
+#define EOP_ERR_ADD 0x04 /* Add value to memory */
+#define EOP_ERR_CSWAP 0x08 /* Compare with operand2, write operand1 if match*/
+#define EOP_ERR_EPOLL 0x09 /* Poll for equality */
+#define EOP_ERR_NPOLL 0x0a /* Poll for inequality */
+
+/* GAMXR - SGI Arithmetic unit */
+#define EOP_XR_CSWAP 0x0b /* Masked compare exchange */
+
+
+/* Transfer types (xtype field) */
+#define XTYPE_B 0x0 /* byte */
+#define XTYPE_S 0x1 /* short (2-byte) */
+#define XTYPE_W 0x2 /* word (4-byte) */
+#define XTYPE_DW 0x3 /* doubleword (8-byte) */
+#define XTYPE_CL 0x6 /* cacheline (64-byte) */
+
+
+/* Instruction access attributes (iaa0, iaa1 fields) */
+#define IAA_RAM 0x0 /* normal cached RAM access */
+#define IAA_NCRAM 0x2 /* noncoherent RAM access */
+#define IAA_MMIO 0x1 /* noncoherent memory-mapped I/O space */
+#define IAA_REGISTER 0x3 /* memory-mapped registers, etc. */
+
+
+/* Instruction mode attributes (ima field) */
+#define IMA_MAPPED 0x0 /* Virtual mode */
+#define IMA_CB_DELAY 0x1 /* hold read responses until status changes */
+#define IMA_UNMAPPED 0x2 /* bypass the TLBs (OS only) */
+#define IMA_INTERRUPT 0x4 /* Interrupt when instruction completes */
+
+/* CBE ecause bits */
+#define CBE_CAUSE_RI (1 << 0)
+#define CBE_CAUSE_INVALID_INSTRUCTION (1 << 1)
+#define CBE_CAUSE_UNMAPPED_MODE_FORBIDDEN (1 << 2)
+#define CBE_CAUSE_PE_CHECK_DATA_ERROR (1 << 3)
+#define CBE_CAUSE_IAA_GAA_MISMATCH (1 << 4)
+#define CBE_CAUSE_DATA_SEGMENT_LIMIT_EXCEPTION (1 << 5)
+#define CBE_CAUSE_OS_FATAL_TLB_FAULT (1 << 6)
+#define CBE_CAUSE_EXECUTION_HW_ERROR (1 << 7)
+#define CBE_CAUSE_TLBHW_ERROR (1 << 8)
+#define CBE_CAUSE_RA_REQUEST_TIMEOUT (1 << 9)
+#define CBE_CAUSE_HA_REQUEST_TIMEOUT (1 << 10)
+#define CBE_CAUSE_RA_RESPONSE_FATAL (1 << 11)
+#define CBE_CAUSE_RA_RESPONSE_NON_FATAL (1 << 12)
+#define CBE_CAUSE_HA_RESPONSE_FATAL (1 << 13)
+#define CBE_CAUSE_HA_RESPONSE_NON_FATAL (1 << 14)
+#define CBE_CAUSE_ADDRESS_SPACE_DECODE_ERROR (1 << 15)
+#define CBE_CAUSE_PROTOCOL_STATE_DATA_ERROR (1 << 16)
+#define CBE_CAUSE_RA_RESPONSE_DATA_ERROR (1 << 17)
+#define CBE_CAUSE_HA_RESPONSE_DATA_ERROR (1 << 18)
+#define CBE_CAUSE_FORCED_ERROR (1 << 19)
+
+/* CBE cbrexecstatus bits */
+#define CBR_EXS_ABORT_OCC_BIT 0
+#define CBR_EXS_INT_OCC_BIT 1
+#define CBR_EXS_PENDING_BIT 2
+#define CBR_EXS_QUEUED_BIT 3
+#define CBR_EXS_TLB_INVAL_BIT 4
+#define CBR_EXS_EXCEPTION_BIT 5
+#define CBR_EXS_CB_INT_PENDING_BIT 6
+
+#define CBR_EXS_ABORT_OCC (1 << CBR_EXS_ABORT_OCC_BIT)
+#define CBR_EXS_INT_OCC (1 << CBR_EXS_INT_OCC_BIT)
+#define CBR_EXS_PENDING (1 << CBR_EXS_PENDING_BIT)
+#define CBR_EXS_QUEUED (1 << CBR_EXS_QUEUED_BIT)
+#define CBR_EXS_TLB_INVAL (1 << CBR_EXS_TLB_INVAL_BIT)
+#define CBR_EXS_EXCEPTION (1 << CBR_EXS_EXCEPTION_BIT)
+#define CBR_EXS_CB_INT_PENDING (1 << CBR_EXS_CB_INT_PENDING_BIT)
+
+/*
+ * Exceptions are retried for the following cases. If any OTHER bits are set
+ * in ecause, the exception is not retryable.
+ */
+#define EXCEPTION_RETRY_BITS (CBE_CAUSE_EXECUTION_HW_ERROR | \
+ CBE_CAUSE_TLBHW_ERROR | \
+ CBE_CAUSE_RA_REQUEST_TIMEOUT | \
+ CBE_CAUSE_RA_RESPONSE_NON_FATAL | \
+ CBE_CAUSE_HA_RESPONSE_NON_FATAL | \
+ CBE_CAUSE_RA_RESPONSE_DATA_ERROR | \
+ CBE_CAUSE_HA_RESPONSE_DATA_ERROR \
+ )
+
+/* Message queue head structure */
+union gru_mesqhead {
+ unsigned long val;
+ struct {
+ unsigned int head;
+ unsigned int limit;
+ };
+};
+
+
+/* Generate the low word of a GRU instruction */
+static inline unsigned long
+__opdword(unsigned char opcode, unsigned char exopc, unsigned char xtype,
+ unsigned char iaa0, unsigned char iaa1,
+ unsigned long idef2, unsigned char ima)
+{
+ return (1 << GRU_CB_ICMD_SHFT) |
+ ((unsigned long)CBS_ACTIVE << GRU_ISTATUS_SHFT) |
+ (idef2<< GRU_IDEF2_SHFT) |
+ (iaa0 << GRU_CB_IAA0_SHFT) |
+ (iaa1 << GRU_CB_IAA1_SHFT) |
+ (ima << GRU_CB_IMA_SHFT) |
+ (xtype << GRU_CB_XTYPE_SHFT) |
+ (opcode << GRU_CB_OPC_SHFT) |
+ (exopc << GRU_CB_EXOPC_SHFT);
+}
+
+/*
+ * Architecture specific intrinsics
+ */
+static inline void gru_flush_cache(void *p)
+{
+ __flush_cache(p);
+}
+
+/*
+ * Store the lower 64 bits of the command including the "start" bit. Then
+ * start the instruction executing.
+ */
+static inline void gru_start_instruction(struct gru_instruction *ins, unsigned long op64)
+{
+ gru_ordered_store_ulong(ins, op64);
+ mb();
+ gru_flush_cache(ins);
+}
+
+
+/* Convert "hints" to IMA */
+#define CB_IMA(h) ((h) | IMA_UNMAPPED)
+
+/* Convert data segment cache line index into TRI0 / TRI1 value */
+#define GRU_DINDEX(i) ((i) * GRU_CACHE_LINE_BYTES)
+
+/* Inline functions for GRU instructions.
+ * Note:
+ * - nelem and stride are in elements
+ * - tri0/tri1 is in bytes for the beginning of the data segment.
+ */
+static inline void gru_vload_phys(void *cb, unsigned long gpa,
+ unsigned int tri0, int iaa, unsigned long hints)
+{
+ struct gru_instruction *ins = (struct gru_instruction *)cb;
+
+ ins->baddr0 = (long)gpa | ((unsigned long)iaa << 62);
+ ins->nelem = 1;
+ ins->op1_stride = 1;
+ gru_start_instruction(ins, __opdword(OP_VLOAD, 0, XTYPE_DW, iaa, 0,
+ (unsigned long)tri0, CB_IMA(hints)));
+}
+
+static inline void gru_vstore_phys(void *cb, unsigned long gpa,
+ unsigned int tri0, int iaa, unsigned long hints)
+{
+ struct gru_instruction *ins = (struct gru_instruction *)cb;
+
+ ins->baddr0 = (long)gpa | ((unsigned long)iaa << 62);
+ ins->nelem = 1;
+ ins->op1_stride = 1;
+ gru_start_instruction(ins, __opdword(OP_VSTORE, 0, XTYPE_DW, iaa, 0,
+ (unsigned long)tri0, CB_IMA(hints)));
+}
+
+static inline void gru_vload(void *cb, unsigned long mem_addr,
+ unsigned int tri0, unsigned char xtype, unsigned long nelem,
+ unsigned long stride, unsigned long hints)
+{
+ struct gru_instruction *ins = (struct gru_instruction *)cb;
+
+ ins->baddr0 = (long)mem_addr;
+ ins->nelem = nelem;
+ ins->op1_stride = stride;
+ gru_start_instruction(ins, __opdword(OP_VLOAD, 0, xtype, IAA_RAM, 0,
+ (unsigned long)tri0, CB_IMA(hints)));
+}
+
+static inline void gru_vstore(void *cb, unsigned long mem_addr,
+ unsigned int tri0, unsigned char xtype, unsigned long nelem,
+ unsigned long stride, unsigned long hints)
+{
+ struct gru_instruction *ins = (void *)cb;
+
+ ins->baddr0 = (long)mem_addr;
+ ins->nelem = nelem;
+ ins->op1_stride = stride;
+ gru_start_instruction(ins, __opdword(OP_VSTORE, 0, xtype, IAA_RAM, 0,
+ tri0, CB_IMA(hints)));
+}
+
+static inline void gru_ivload(void *cb, unsigned long mem_addr,
+ unsigned int tri0, unsigned int tri1, unsigned char xtype,
+ unsigned long nelem, unsigned long hints)
+{
+ struct gru_instruction *ins = (void *)cb;
+
+ ins->baddr0 = (long)mem_addr;
+ ins->nelem = nelem;
+ ins->tri1_bufsize = tri1;
+ gru_start_instruction(ins, __opdword(OP_IVLOAD, 0, xtype, IAA_RAM, 0,
+ tri0, CB_IMA(hints)));
+}
+
+static inline void gru_ivstore(void *cb, unsigned long mem_addr,
+ unsigned int tri0, unsigned int tri1,
+ unsigned char xtype, unsigned long nelem, unsigned long hints)
+{
+ struct gru_instruction *ins = (void *)cb;
+
+ ins->baddr0 = (long)mem_addr;
+ ins->nelem = nelem;
+ ins->tri1_bufsize = tri1;
+ gru_start_instruction(ins, __opdword(OP_IVSTORE, 0, xtype, IAA_RAM, 0,
+ tri0, CB_IMA(hints)));
+}
+
+static inline void gru_vset(void *cb, unsigned long mem_addr,
+ unsigned long value, unsigned char xtype, unsigned long nelem,
+ unsigned long stride, unsigned long hints)
+{
+ struct gru_instruction *ins = (void *)cb;
+
+ ins->baddr0 = (long)mem_addr;
+ ins->op2_value_baddr1 = value;
+ ins->nelem = nelem;
+ ins->op1_stride = stride;
+ gru_start_instruction(ins, __opdword(OP_VSET, 0, xtype, IAA_RAM, 0,
+ 0, CB_IMA(hints)));
+}
+
+static inline void gru_ivset(void *cb, unsigned long mem_addr,
+ unsigned int tri1, unsigned long value, unsigned char xtype,
+ unsigned long nelem, unsigned long hints)
+{
+ struct gru_instruction *ins = (void *)cb;
+
+ ins->baddr0 = (long)mem_addr;
+ ins->op2_value_baddr1 = value;
+ ins->nelem = nelem;
+ ins->tri1_bufsize = tri1;
+ gru_start_instruction(ins, __opdword(OP_IVSET, 0, xtype, IAA_RAM, 0,
+ 0, CB_IMA(hints)));
+}
+
+static inline void gru_vflush(void *cb, unsigned long mem_addr,
+ unsigned long nelem, unsigned char xtype, unsigned long stride,
+ unsigned long hints)
+{
+ struct gru_instruction *ins = (void *)cb;
+
+ ins->baddr0 = (long)mem_addr;
+ ins->op1_stride = stride;
+ ins->nelem = nelem;
+ gru_start_instruction(ins, __opdword(OP_VFLUSH, 0, xtype, IAA_RAM, 0,
+ 0, CB_IMA(hints)));
+}
+
+static inline void gru_nop(void *cb, int hints)
+{
+ struct gru_instruction *ins = (void *)cb;
+
+ gru_start_instruction(ins, __opdword(OP_NOP, 0, 0, 0, 0, 0, CB_IMA(hints)));
+}
+
+
+static inline void gru_bcopy(void *cb, const unsigned long src,
+ unsigned long dest,
+ unsigned int tri0, unsigned int xtype, unsigned long nelem,
+ unsigned int bufsize, unsigned long hints)
+{
+ struct gru_instruction *ins = (void *)cb;
+
+ ins->baddr0 = (long)src;
+ ins->op2_value_baddr1 = (long)dest;
+ ins->nelem = nelem;
+ ins->tri1_bufsize = bufsize;
+ gru_start_instruction(ins, __opdword(OP_BCOPY, 0, xtype, IAA_RAM,
+ IAA_RAM, tri0, CB_IMA(hints)));
+}
+
+static inline void gru_bstore(void *cb, const unsigned long src,
+ unsigned long dest, unsigned int tri0, unsigned int xtype,
+ unsigned long nelem, unsigned long hints)
+{
+ struct gru_instruction *ins = (void *)cb;
+
+ ins->baddr0 = (long)src;
+ ins->op2_value_baddr1 = (long)dest;
+ ins->nelem = nelem;
+ gru_start_instruction(ins, __opdword(OP_BSTORE, 0, xtype, 0, IAA_RAM,
+ tri0, CB_IMA(hints)));
+}
+
+static inline void gru_gamir(void *cb, int exopc, unsigned long src,
+ unsigned int xtype, unsigned long hints)
+{
+ struct gru_instruction *ins = (void *)cb;
+
+ ins->baddr0 = (long)src;
+ gru_start_instruction(ins, __opdword(OP_GAMIR, exopc, xtype, IAA_RAM, 0,
+ 0, CB_IMA(hints)));
+}
+
+static inline void gru_gamirr(void *cb, int exopc, unsigned long src,
+ unsigned int xtype, unsigned long hints)
+{
+ struct gru_instruction *ins = (void *)cb;
+
+ ins->baddr0 = (long)src;
+ gru_start_instruction(ins, __opdword(OP_GAMIRR, exopc, xtype, IAA_RAM, 0,
+ 0, CB_IMA(hints)));
+}
+
+static inline void gru_gamer(void *cb, int exopc, unsigned long src,
+ unsigned int xtype,
+ unsigned long operand1, unsigned long operand2,
+ unsigned long hints)
+{
+ struct gru_instruction *ins = (void *)cb;
+
+ ins->baddr0 = (long)src;
+ ins->op1_stride = operand1;
+ ins->op2_value_baddr1 = operand2;
+ gru_start_instruction(ins, __opdword(OP_GAMER, exopc, xtype, IAA_RAM, 0,
+ 0, CB_IMA(hints)));
+}
+
+static inline void gru_gamerr(void *cb, int exopc, unsigned long src,
+ unsigned int xtype, unsigned long operand1,
+ unsigned long operand2, unsigned long hints)
+{
+ struct gru_instruction *ins = (void *)cb;
+
+ ins->baddr0 = (long)src;
+ ins->op1_stride = operand1;
+ ins->op2_value_baddr1 = operand2;
+ gru_start_instruction(ins, __opdword(OP_GAMERR, exopc, xtype, IAA_RAM, 0,
+ 0, CB_IMA(hints)));
+}
+
+static inline void gru_gamxr(void *cb, unsigned long src,
+ unsigned int tri0, unsigned long hints)
+{
+ struct gru_instruction *ins = (void *)cb;
+
+ ins->baddr0 = (long)src;
+ ins->nelem = 4;
+ gru_start_instruction(ins, __opdword(OP_GAMXR, EOP_XR_CSWAP, XTYPE_DW,
+ IAA_RAM, 0, 0, CB_IMA(hints)));
+}
+
+static inline void gru_mesq(void *cb, unsigned long queue,
+ unsigned long tri0, unsigned long nelem,
+ unsigned long hints)
+{
+ struct gru_instruction *ins = (void *)cb;
+
+ ins->baddr0 = (long)queue;
+ ins->nelem = nelem;
+ gru_start_instruction(ins, __opdword(OP_MESQ, 0, XTYPE_CL, IAA_RAM, 0,
+ tri0, CB_IMA(hints)));
+}
+
+static inline unsigned long gru_get_amo_value(void *cb)
+{
+ struct gru_instruction *ins = (void *)cb;
+
+ return ins->avalue;
+}
+
+static inline int gru_get_amo_value_head(void *cb)
+{
+ struct gru_instruction *ins = (void *)cb;
+
+ return ins->avalue & 0xffffffff;
+}
+
+static inline int gru_get_amo_value_limit(void *cb)
+{
+ struct gru_instruction *ins = (void *)cb;
+
+ return ins->avalue >> 32;
+}
+
+static inline union gru_mesqhead gru_mesq_head(int head, int limit)
+{
+ union gru_mesqhead mqh;
+
+ mqh.head = head;
+ mqh.limit = limit;
+ return mqh;
+}
+
+/*
+ * Get struct control_block_extended_exc_detail for CB.
+ */
+extern int gru_get_cb_exception_detail(void *cb,
+ struct control_block_extended_exc_detail *excdet);
+
+#define GRU_EXC_STR_SIZE 256
+
+
+/*
+ * Control block definition for checking status
+ */
+struct gru_control_block_status {
+ unsigned int icmd :1;
+ unsigned int ima :3;
+ unsigned int reserved0 :4;
+ unsigned int unused1 :24;
+ unsigned int unused2 :24;
+ unsigned int istatus :2;
+ unsigned int isubstatus :4;
+ unsigned int unused3 :2;
+};
+
+/* Get CB status */
+static inline int gru_get_cb_status(void *cb)
+{
+ struct gru_control_block_status *cbs = (void *)cb;
+
+ return cbs->istatus;
+}
+
+/* Get CB message queue substatus */
+static inline int gru_get_cb_message_queue_substatus(void *cb)
+{
+ struct gru_control_block_status *cbs = (void *)cb;
+
+ return cbs->isubstatus & CBSS_MSG_QUEUE_MASK;
+}
+
+/* Get CB substatus */
+static inline int gru_get_cb_substatus(void *cb)
+{
+ struct gru_control_block_status *cbs = (void *)cb;
+
+ return cbs->isubstatus;
+}
+
+/*
+ * User interface to check an instruction status. UPM and exceptions
+ * are handled automatically. However, this function does NOT wait
+ * for an active instruction to complete.
+ *
+ */
+static inline int gru_check_status(void *cb)
+{
+ struct gru_control_block_status *cbs = (void *)cb;
+ int ret;
+
+ ret = cbs->istatus;
+ if (ret != CBS_ACTIVE)
+ ret = gru_check_status_proc(cb);
+ return ret;
+}
+
+/*
+ * User interface (via inline function) to wait for an instruction
+ * to complete. Completion status (IDLE or EXCEPTION is returned
+ * to the user. Exception due to hardware errors are automatically
+ * retried before returning an exception.
+ *
+ */
+static inline int gru_wait(void *cb)
+{
+ return gru_wait_proc(cb);
+}
+
+/*
+ * Wait for CB to complete. Aborts program if error. (Note: error does NOT
+ * mean TLB mis - only fatal errors such as memory parity error or user
+ * bugs will cause termination.
+ */
+static inline void gru_wait_abort(void *cb)
+{
+ gru_wait_abort_proc(cb);
+}
+
+/*
+ * Get a pointer to the start of a gseg
+ * p - Any valid pointer within the gseg
+ */
+static inline void *gru_get_gseg_pointer (void *p)
+{
+ return (void *)((unsigned long)p & ~(GRU_GSEG_PAGESIZE - 1));
+}
+
+/*
+ * Get a pointer to a control block
+ * gseg - GSeg address returned from gru_get_thread_gru_segment()
+ * index - index of desired CB
+ */
+static inline void *gru_get_cb_pointer(void *gseg,
+ int index)
+{
+ return gseg + GRU_CB_BASE + index * GRU_HANDLE_STRIDE;
+}
+
+/*
+ * Get a pointer to a cacheline in the data segment portion of a GSeg
+ * gseg - GSeg address returned from gru_get_thread_gru_segment()
+ * index - index of desired cache line
+ */
+static inline void *gru_get_data_pointer(void *gseg, int index)
+{
+ return gseg + GRU_DS_BASE + index * GRU_CACHE_LINE_BYTES;
+}
+
+/*
+ * Convert a vaddr into the tri index within the GSEG
+ * vaddr - virtual address of within gseg
+ */
+static inline int gru_get_tri(void *vaddr)
+{
+ return ((unsigned long)vaddr & (GRU_GSEG_PAGESIZE - 1)) - GRU_DS_BASE;
+}
+#endif /* __GRU_INSTRUCTIONS_H__ */
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
new file mode 100644
index 00000000..c4acac74
--- /dev/null
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -0,0 +1,902 @@
+/*
+ * SN Platform GRU Driver
+ *
+ * FAULT HANDLER FOR GRU DETECTED TLB MISSES
+ *
+ * This file contains code that handles TLB misses within the GRU.
+ * These misses are reported either via interrupts or user polling of
+ * the user CB.
+ *
+ * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/hugetlb.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/security.h>
+#include <linux/prefetch.h>
+#include <asm/pgtable.h>
+#include "gru.h"
+#include "grutables.h"
+#include "grulib.h"
+#include "gru_instructions.h"
+#include <asm/uv/uv_hub.h>
+
+/* Return codes for vtop functions */
+#define VTOP_SUCCESS 0
+#define VTOP_INVALID -1
+#define VTOP_RETRY -2
+
+
+/*
+ * Test if a physical address is a valid GRU GSEG address
+ */
+static inline int is_gru_paddr(unsigned long paddr)
+{
+ return paddr >= gru_start_paddr && paddr < gru_end_paddr;
+}
+
+/*
+ * Find the vma of a GRU segment. Caller must hold mmap_sem.
+ */
+struct vm_area_struct *gru_find_vma(unsigned long vaddr)
+{
+ struct vm_area_struct *vma;
+
+ vma = find_vma(current->mm, vaddr);
+ if (vma && vma->vm_start <= vaddr && vma->vm_ops == &gru_vm_ops)
+ return vma;
+ return NULL;
+}
+
+/*
+ * Find and lock the gts that contains the specified user vaddr.
+ *
+ * Returns:
+ * - *gts with the mmap_sem locked for read and the GTS locked.
+ * - NULL if vaddr invalid OR is not a valid GSEG vaddr.
+ */
+
+static struct gru_thread_state *gru_find_lock_gts(unsigned long vaddr)
+{
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ struct gru_thread_state *gts = NULL;
+
+ down_read(&mm->mmap_sem);
+ vma = gru_find_vma(vaddr);
+ if (vma)
+ gts = gru_find_thread_state(vma, TSID(vaddr, vma));
+ if (gts)
+ mutex_lock(&gts->ts_ctxlock);
+ else
+ up_read(&mm->mmap_sem);
+ return gts;
+}
+
+static struct gru_thread_state *gru_alloc_locked_gts(unsigned long vaddr)
+{
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ struct gru_thread_state *gts = ERR_PTR(-EINVAL);
+
+ down_write(&mm->mmap_sem);
+ vma = gru_find_vma(vaddr);
+ if (!vma)
+ goto err;
+
+ gts = gru_alloc_thread_state(vma, TSID(vaddr, vma));
+ if (IS_ERR(gts))
+ goto err;
+ mutex_lock(&gts->ts_ctxlock);
+ downgrade_write(&mm->mmap_sem);
+ return gts;
+
+err:
+ up_write(&mm->mmap_sem);
+ return gts;
+}
+
+/*
+ * Unlock a GTS that was previously locked with gru_find_lock_gts().
+ */
+static void gru_unlock_gts(struct gru_thread_state *gts)
+{
+ mutex_unlock(&gts->ts_ctxlock);
+ up_read(&current->mm->mmap_sem);
+}
+
+/*
+ * Set a CB.istatus to active using a user virtual address. This must be done
+ * just prior to a TFH RESTART. The new cb.istatus is an in-cache status ONLY.
+ * If the line is evicted, the status may be lost. The in-cache update
+ * is necessary to prevent the user from seeing a stale cb.istatus that will
+ * change as soon as the TFH restart is complete. Races may cause an
+ * occasional failure to clear the cb.istatus, but that is ok.
+ */
+static void gru_cb_set_istatus_active(struct gru_instruction_bits *cbk)
+{
+ if (cbk) {
+ cbk->istatus = CBS_ACTIVE;
+ }
+}
+
+/*
+ * Read & clear a TFM
+ *
+ * The GRU has an array of fault maps. A map is private to a cpu
+ * Only one cpu will be accessing a cpu's fault map.
+ *
+ * This function scans the cpu-private fault map & clears all bits that
+ * are set. The function returns a bitmap that indicates the bits that
+ * were cleared. Note that sense the maps may be updated asynchronously by
+ * the GRU, atomic operations must be used to clear bits.
+ */
+static void get_clear_fault_map(struct gru_state *gru,
+ struct gru_tlb_fault_map *imap,
+ struct gru_tlb_fault_map *dmap)
+{
+ unsigned long i, k;
+ struct gru_tlb_fault_map *tfm;
+
+ tfm = get_tfm_for_cpu(gru, gru_cpu_fault_map_id());
+ prefetchw(tfm); /* Helps on hardware, required for emulator */
+ for (i = 0; i < BITS_TO_LONGS(GRU_NUM_CBE); i++) {
+ k = tfm->fault_bits[i];
+ if (k)
+ k = xchg(&tfm->fault_bits[i], 0UL);
+ imap->fault_bits[i] = k;
+ k = tfm->done_bits[i];
+ if (k)
+ k = xchg(&tfm->done_bits[i], 0UL);
+ dmap->fault_bits[i] = k;
+ }
+
+ /*
+ * Not functionally required but helps performance. (Required
+ * on emulator)
+ */
+ gru_flush_cache(tfm);
+}
+
+/*
+ * Atomic (interrupt context) & non-atomic (user context) functions to
+ * convert a vaddr into a physical address. The size of the page
+ * is returned in pageshift.
+ * returns:
+ * 0 - successful
+ * < 0 - error code
+ * 1 - (atomic only) try again in non-atomic context
+ */
+static int non_atomic_pte_lookup(struct vm_area_struct *vma,
+ unsigned long vaddr, int write,
+ unsigned long *paddr, int *pageshift)
+{
+ struct page *page;
+
+#ifdef CONFIG_HUGETLB_PAGE
+ *pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
+#else
+ *pageshift = PAGE_SHIFT;
+#endif
+ if (get_user_pages
+ (current, current->mm, vaddr, 1, write, 0, &page, NULL) <= 0)
+ return -EFAULT;
+ *paddr = page_to_phys(page);
+ put_page(page);
+ return 0;
+}
+
+/*
+ * atomic_pte_lookup
+ *
+ * Convert a user virtual address to a physical address
+ * Only supports Intel large pages (2MB only) on x86_64.
+ * ZZZ - hugepage support is incomplete
+ *
+ * NOTE: mmap_sem is already held on entry to this function. This
+ * guarantees existence of the page tables.
+ */
+static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr,
+ int write, unsigned long *paddr, int *pageshift)
+{
+ pgd_t *pgdp;
+ pmd_t *pmdp;
+ pud_t *pudp;
+ pte_t pte;
+
+ pgdp = pgd_offset(vma->vm_mm, vaddr);
+ if (unlikely(pgd_none(*pgdp)))
+ goto err;
+
+ pudp = pud_offset(pgdp, vaddr);
+ if (unlikely(pud_none(*pudp)))
+ goto err;
+
+ pmdp = pmd_offset(pudp, vaddr);
+ if (unlikely(pmd_none(*pmdp)))
+ goto err;
+#ifdef CONFIG_X86_64
+ if (unlikely(pmd_large(*pmdp)))
+ pte = *(pte_t *) pmdp;
+ else
+#endif
+ pte = *pte_offset_kernel(pmdp, vaddr);
+
+ if (unlikely(!pte_present(pte) ||
+ (write && (!pte_write(pte) || !pte_dirty(pte)))))
+ return 1;
+
+ *paddr = pte_pfn(pte) << PAGE_SHIFT;
+#ifdef CONFIG_HUGETLB_PAGE
+ *pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
+#else
+ *pageshift = PAGE_SHIFT;
+#endif
+ return 0;
+
+err:
+ return 1;
+}
+
+static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
+ int write, int atomic, unsigned long *gpa, int *pageshift)
+{
+ struct mm_struct *mm = gts->ts_mm;
+ struct vm_area_struct *vma;
+ unsigned long paddr;
+ int ret, ps;
+
+ vma = find_vma(mm, vaddr);
+ if (!vma)
+ goto inval;
+
+ /*
+ * Atomic lookup is faster & usually works even if called in non-atomic
+ * context.
+ */
+ rmb(); /* Must/check ms_range_active before loading PTEs */
+ ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &ps);
+ if (ret) {
+ if (atomic)
+ goto upm;
+ if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, &ps))
+ goto inval;
+ }
+ if (is_gru_paddr(paddr))
+ goto inval;
+ paddr = paddr & ~((1UL << ps) - 1);
+ *gpa = uv_soc_phys_ram_to_gpa(paddr);
+ *pageshift = ps;
+ return VTOP_SUCCESS;
+
+inval:
+ return VTOP_INVALID;
+upm:
+ return VTOP_RETRY;
+}
+
+
+/*
+ * Flush a CBE from cache. The CBE is clean in the cache. Dirty the
+ * CBE cacheline so that the line will be written back to home agent.
+ * Otherwise the line may be silently dropped. This has no impact
+ * except on performance.
+ */
+static void gru_flush_cache_cbe(struct gru_control_block_extended *cbe)
+{
+ if (unlikely(cbe)) {
+ cbe->cbrexecstatus = 0; /* make CL dirty */
+ gru_flush_cache(cbe);
+ }
+}
+
+/*
+ * Preload the TLB with entries that may be required. Currently, preloading
+ * is implemented only for BCOPY. Preload <tlb_preload_count> pages OR to
+ * the end of the bcopy tranfer, whichever is smaller.
+ */
+static void gru_preload_tlb(struct gru_state *gru,
+ struct gru_thread_state *gts, int atomic,
+ unsigned long fault_vaddr, int asid, int write,
+ unsigned char tlb_preload_count,
+ struct gru_tlb_fault_handle *tfh,
+ struct gru_control_block_extended *cbe)
+{
+ unsigned long vaddr = 0, gpa;
+ int ret, pageshift;
+
+ if (cbe->opccpy != OP_BCOPY)
+ return;
+
+ if (fault_vaddr == cbe->cbe_baddr0)
+ vaddr = fault_vaddr + GRU_CACHE_LINE_BYTES * cbe->cbe_src_cl - 1;
+ else if (fault_vaddr == cbe->cbe_baddr1)
+ vaddr = fault_vaddr + (1 << cbe->xtypecpy) * cbe->cbe_nelemcur - 1;
+
+ fault_vaddr &= PAGE_MASK;
+ vaddr &= PAGE_MASK;
+ vaddr = min(vaddr, fault_vaddr + tlb_preload_count * PAGE_SIZE);
+
+ while (vaddr > fault_vaddr) {
+ ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
+ if (ret || tfh_write_only(tfh, gpa, GAA_RAM, vaddr, asid, write,
+ GRU_PAGESIZE(pageshift)))
+ return;
+ gru_dbg(grudev,
+ "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, rw %d, ps %d, gpa 0x%lx\n",
+ atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh,
+ vaddr, asid, write, pageshift, gpa);
+ vaddr -= PAGE_SIZE;
+ STAT(tlb_preload_page);
+ }
+}
+
+/*
+ * Drop a TLB entry into the GRU. The fault is described by info in an TFH.
+ * Input:
+ * cb Address of user CBR. Null if not running in user context
+ * Return:
+ * 0 = dropin, exception, or switch to UPM successful
+ * 1 = range invalidate active
+ * < 0 = error code
+ *
+ */
+static int gru_try_dropin(struct gru_state *gru,
+ struct gru_thread_state *gts,
+ struct gru_tlb_fault_handle *tfh,
+ struct gru_instruction_bits *cbk)
+{
+ struct gru_control_block_extended *cbe = NULL;
+ unsigned char tlb_preload_count = gts->ts_tlb_preload_count;
+ int pageshift = 0, asid, write, ret, atomic = !cbk, indexway;
+ unsigned long gpa = 0, vaddr = 0;
+
+ /*
+ * NOTE: The GRU contains magic hardware that eliminates races between
+ * TLB invalidates and TLB dropins. If an invalidate occurs
+ * in the window between reading the TFH and the subsequent TLB dropin,
+ * the dropin is ignored. This eliminates the need for additional locks.
+ */
+
+ /*
+ * Prefetch the CBE if doing TLB preloading
+ */
+ if (unlikely(tlb_preload_count)) {
+ cbe = gru_tfh_to_cbe(tfh);
+ prefetchw(cbe);
+ }
+
+ /*
+ * Error if TFH state is IDLE or FMM mode & the user issuing a UPM call.
+ * Might be a hardware race OR a stupid user. Ignore FMM because FMM
+ * is a transient state.
+ */
+ if (tfh->status != TFHSTATUS_EXCEPTION) {
+ gru_flush_cache(tfh);
+ sync_core();
+ if (tfh->status != TFHSTATUS_EXCEPTION)
+ goto failnoexception;
+ STAT(tfh_stale_on_fault);
+ }
+ if (tfh->state == TFHSTATE_IDLE)
+ goto failidle;
+ if (tfh->state == TFHSTATE_MISS_FMM && cbk)
+ goto failfmm;
+
+ write = (tfh->cause & TFHCAUSE_TLB_MOD) != 0;
+ vaddr = tfh->missvaddr;
+ asid = tfh->missasid;
+ indexway = tfh->indexway;
+ if (asid == 0)
+ goto failnoasid;
+
+ rmb(); /* TFH must be cache resident before reading ms_range_active */
+
+ /*
+ * TFH is cache resident - at least briefly. Fail the dropin
+ * if a range invalidate is active.
+ */
+ if (atomic_read(&gts->ts_gms->ms_range_active))
+ goto failactive;
+
+ ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
+ if (ret == VTOP_INVALID)
+ goto failinval;
+ if (ret == VTOP_RETRY)
+ goto failupm;
+
+ if (!(gts->ts_sizeavail & GRU_SIZEAVAIL(pageshift))) {
+ gts->ts_sizeavail |= GRU_SIZEAVAIL(pageshift);
+ if (atomic || !gru_update_cch(gts)) {
+ gts->ts_force_cch_reload = 1;
+ goto failupm;
+ }
+ }
+
+ if (unlikely(cbe) && pageshift == PAGE_SHIFT) {
+ gru_preload_tlb(gru, gts, atomic, vaddr, asid, write, tlb_preload_count, tfh, cbe);
+ gru_flush_cache_cbe(cbe);
+ }
+
+ gru_cb_set_istatus_active(cbk);
+ gts->ustats.tlbdropin++;
+ tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write,
+ GRU_PAGESIZE(pageshift));
+ gru_dbg(grudev,
+ "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, indexway 0x%x,"
+ " rw %d, ps %d, gpa 0x%lx\n",
+ atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh, vaddr, asid,
+ indexway, write, pageshift, gpa);
+ STAT(tlb_dropin);
+ return 0;
+
+failnoasid:
+ /* No asid (delayed unload). */
+ STAT(tlb_dropin_fail_no_asid);
+ gru_dbg(grudev, "FAILED no_asid tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
+ if (!cbk)
+ tfh_user_polling_mode(tfh);
+ else
+ gru_flush_cache(tfh);
+ gru_flush_cache_cbe(cbe);
+ return -EAGAIN;
+
+failupm:
+ /* Atomic failure switch CBR to UPM */
+ tfh_user_polling_mode(tfh);
+ gru_flush_cache_cbe(cbe);
+ STAT(tlb_dropin_fail_upm);
+ gru_dbg(grudev, "FAILED upm tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
+ return 1;
+
+failfmm:
+ /* FMM state on UPM call */
+ gru_flush_cache(tfh);
+ gru_flush_cache_cbe(cbe);
+ STAT(tlb_dropin_fail_fmm);
+ gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state);
+ return 0;
+
+failnoexception:
+ /* TFH status did not show exception pending */
+ gru_flush_cache(tfh);
+ gru_flush_cache_cbe(cbe);
+ if (cbk)
+ gru_flush_cache(cbk);
+ STAT(tlb_dropin_fail_no_exception);
+ gru_dbg(grudev, "FAILED non-exception tfh: 0x%p, status %d, state %d\n",
+ tfh, tfh->status, tfh->state);
+ return 0;
+
+failidle:
+ /* TFH state was idle - no miss pending */
+ gru_flush_cache(tfh);
+ gru_flush_cache_cbe(cbe);
+ if (cbk)
+ gru_flush_cache(cbk);
+ STAT(tlb_dropin_fail_idle);
+ gru_dbg(grudev, "FAILED idle tfh: 0x%p, state %d\n", tfh, tfh->state);
+ return 0;
+
+failinval:
+ /* All errors (atomic & non-atomic) switch CBR to EXCEPTION state */
+ tfh_exception(tfh);
+ gru_flush_cache_cbe(cbe);
+ STAT(tlb_dropin_fail_invalid);
+ gru_dbg(grudev, "FAILED inval tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
+ return -EFAULT;
+
+failactive:
+ /* Range invalidate active. Switch to UPM iff atomic */
+ if (!cbk)
+ tfh_user_polling_mode(tfh);
+ else
+ gru_flush_cache(tfh);
+ gru_flush_cache_cbe(cbe);
+ STAT(tlb_dropin_fail_range_active);
+ gru_dbg(grudev, "FAILED range active: tfh 0x%p, vaddr 0x%lx\n",
+ tfh, vaddr);
+ return 1;
+}
+
+/*
+ * Process an external interrupt from the GRU. This interrupt is
+ * caused by a TLB miss.
+ * Note that this is the interrupt handler that is registered with linux
+ * interrupt handlers.
+ */
+static irqreturn_t gru_intr(int chiplet, int blade)
+{
+ struct gru_state *gru;
+ struct gru_tlb_fault_map imap, dmap;
+ struct gru_thread_state *gts;
+ struct gru_tlb_fault_handle *tfh = NULL;
+ struct completion *cmp;
+ int cbrnum, ctxnum;
+
+ STAT(intr);
+
+ gru = &gru_base[blade]->bs_grus[chiplet];
+ if (!gru) {
+ dev_err(grudev, "GRU: invalid interrupt: cpu %d, chiplet %d\n",
+ raw_smp_processor_id(), chiplet);
+ return IRQ_NONE;
+ }
+ get_clear_fault_map(gru, &imap, &dmap);
+ gru_dbg(grudev,
+ "cpu %d, chiplet %d, gid %d, imap %016lx %016lx, dmap %016lx %016lx\n",
+ smp_processor_id(), chiplet, gru->gs_gid,
+ imap.fault_bits[0], imap.fault_bits[1],
+ dmap.fault_bits[0], dmap.fault_bits[1]);
+
+ for_each_cbr_in_tfm(cbrnum, dmap.fault_bits) {
+ STAT(intr_cbr);
+ cmp = gru->gs_blade->bs_async_wq;
+ if (cmp)
+ complete(cmp);
+ gru_dbg(grudev, "gid %d, cbr_done %d, done %d\n",
+ gru->gs_gid, cbrnum, cmp ? cmp->done : -1);
+ }
+
+ for_each_cbr_in_tfm(cbrnum, imap.fault_bits) {
+ STAT(intr_tfh);
+ tfh = get_tfh_by_index(gru, cbrnum);
+ prefetchw(tfh); /* Helps on hdw, required for emulator */
+
+ /*
+ * When hardware sets a bit in the faultmap, it implicitly
+ * locks the GRU context so that it cannot be unloaded.
+ * The gts cannot change until a TFH start/writestart command
+ * is issued.
+ */
+ ctxnum = tfh->ctxnum;
+ gts = gru->gs_gts[ctxnum];
+
+ /* Spurious interrupts can cause this. Ignore. */
+ if (!gts) {
+ STAT(intr_spurious);
+ continue;
+ }
+
+ /*
+ * This is running in interrupt context. Trylock the mmap_sem.
+ * If it fails, retry the fault in user context.
+ */
+ gts->ustats.fmm_tlbmiss++;
+ if (!gts->ts_force_cch_reload &&
+ down_read_trylock(&gts->ts_mm->mmap_sem)) {
+ gru_try_dropin(gru, gts, tfh, NULL);
+ up_read(&gts->ts_mm->mmap_sem);
+ } else {
+ tfh_user_polling_mode(tfh);
+ STAT(intr_mm_lock_failed);
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+irqreturn_t gru0_intr(int irq, void *dev_id)
+{
+ return gru_intr(0, uv_numa_blade_id());
+}
+
+irqreturn_t gru1_intr(int irq, void *dev_id)
+{
+ return gru_intr(1, uv_numa_blade_id());
+}
+
+irqreturn_t gru_intr_mblade(int irq, void *dev_id)
+{
+ int blade;
+
+ for_each_possible_blade(blade) {
+ if (uv_blade_nr_possible_cpus(blade))
+ continue;
+ gru_intr(0, blade);
+ gru_intr(1, blade);
+ }
+ return IRQ_HANDLED;
+}
+
+
+static int gru_user_dropin(struct gru_thread_state *gts,
+ struct gru_tlb_fault_handle *tfh,
+ void *cb)
+{
+ struct gru_mm_struct *gms = gts->ts_gms;
+ int ret;
+
+ gts->ustats.upm_tlbmiss++;
+ while (1) {
+ wait_event(gms->ms_wait_queue,
+ atomic_read(&gms->ms_range_active) == 0);
+ prefetchw(tfh); /* Helps on hdw, required for emulator */
+ ret = gru_try_dropin(gts->ts_gru, gts, tfh, cb);
+ if (ret <= 0)
+ return ret;
+ STAT(call_os_wait_queue);
+ }
+}
+
+/*
+ * This interface is called as a result of a user detecting a "call OS" bit
+ * in a user CB. Normally means that a TLB fault has occurred.
+ * cb - user virtual address of the CB
+ */
+int gru_handle_user_call_os(unsigned long cb)
+{
+ struct gru_tlb_fault_handle *tfh;
+ struct gru_thread_state *gts;
+ void *cbk;
+ int ucbnum, cbrnum, ret = -EINVAL;
+
+ STAT(call_os);
+
+ /* sanity check the cb pointer */
+ ucbnum = get_cb_number((void *)cb);
+ if ((cb & (GRU_HANDLE_STRIDE - 1)) || ucbnum >= GRU_NUM_CB)
+ return -EINVAL;
+
+ gts = gru_find_lock_gts(cb);
+ if (!gts)
+ return -EINVAL;
+ gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n", cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts);
+
+ if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE)
+ goto exit;
+
+ gru_check_context_placement(gts);
+
+ /*
+ * CCH may contain stale data if ts_force_cch_reload is set.
+ */
+ if (gts->ts_gru && gts->ts_force_cch_reload) {
+ gts->ts_force_cch_reload = 0;
+ gru_update_cch(gts);
+ }
+
+ ret = -EAGAIN;
+ cbrnum = thread_cbr_number(gts, ucbnum);
+ if (gts->ts_gru) {
+ tfh = get_tfh_by_index(gts->ts_gru, cbrnum);
+ cbk = get_gseg_base_address_cb(gts->ts_gru->gs_gru_base_vaddr,
+ gts->ts_ctxnum, ucbnum);
+ ret = gru_user_dropin(gts, tfh, cbk);
+ }
+exit:
+ gru_unlock_gts(gts);
+ return ret;
+}
+
+/*
+ * Fetch the exception detail information for a CB that terminated with
+ * an exception.
+ */
+int gru_get_exception_detail(unsigned long arg)
+{
+ struct control_block_extended_exc_detail excdet;
+ struct gru_control_block_extended *cbe;
+ struct gru_thread_state *gts;
+ int ucbnum, cbrnum, ret;
+
+ STAT(user_exception);
+ if (copy_from_user(&excdet, (void __user *)arg, sizeof(excdet)))
+ return -EFAULT;
+
+ gts = gru_find_lock_gts(excdet.cb);
+ if (!gts)
+ return -EINVAL;
+
+ gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n", excdet.cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts);
+ ucbnum = get_cb_number((void *)excdet.cb);
+ if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) {
+ ret = -EINVAL;
+ } else if (gts->ts_gru) {
+ cbrnum = thread_cbr_number(gts, ucbnum);
+ cbe = get_cbe_by_index(gts->ts_gru, cbrnum);
+ gru_flush_cache(cbe); /* CBE not coherent */
+ sync_core(); /* make sure we are have current data */
+ excdet.opc = cbe->opccpy;
+ excdet.exopc = cbe->exopccpy;
+ excdet.ecause = cbe->ecause;
+ excdet.exceptdet0 = cbe->idef1upd;
+ excdet.exceptdet1 = cbe->idef3upd;
+ excdet.cbrstate = cbe->cbrstate;
+ excdet.cbrexecstatus = cbe->cbrexecstatus;
+ gru_flush_cache_cbe(cbe);
+ ret = 0;
+ } else {
+ ret = -EAGAIN;
+ }
+ gru_unlock_gts(gts);
+
+ gru_dbg(grudev,
+ "cb 0x%lx, op %d, exopc %d, cbrstate %d, cbrexecstatus 0x%x, ecause 0x%x, "
+ "exdet0 0x%lx, exdet1 0x%x\n",
+ excdet.cb, excdet.opc, excdet.exopc, excdet.cbrstate, excdet.cbrexecstatus,
+ excdet.ecause, excdet.exceptdet0, excdet.exceptdet1);
+ if (!ret && copy_to_user((void __user *)arg, &excdet, sizeof(excdet)))
+ ret = -EFAULT;
+ return ret;
+}
+
+/*
+ * User request to unload a context. Content is saved for possible reload.
+ */
+static int gru_unload_all_contexts(void)
+{
+ struct gru_thread_state *gts;
+ struct gru_state *gru;
+ int gid, ctxnum;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ foreach_gid(gid) {
+ gru = GID_TO_GRU(gid);
+ spin_lock(&gru->gs_lock);
+ for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) {
+ gts = gru->gs_gts[ctxnum];
+ if (gts && mutex_trylock(&gts->ts_ctxlock)) {
+ spin_unlock(&gru->gs_lock);
+ gru_unload_context(gts, 1);
+ mutex_unlock(&gts->ts_ctxlock);
+ spin_lock(&gru->gs_lock);
+ }
+ }
+ spin_unlock(&gru->gs_lock);
+ }
+ return 0;
+}
+
+int gru_user_unload_context(unsigned long arg)
+{
+ struct gru_thread_state *gts;
+ struct gru_unload_context_req req;
+
+ STAT(user_unload_context);
+ if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
+ return -EFAULT;
+
+ gru_dbg(grudev, "gseg 0x%lx\n", req.gseg);
+
+ if (!req.gseg)
+ return gru_unload_all_contexts();
+
+ gts = gru_find_lock_gts(req.gseg);
+ if (!gts)
+ return -EINVAL;
+
+ if (gts->ts_gru)
+ gru_unload_context(gts, 1);
+ gru_unlock_gts(gts);
+
+ return 0;
+}
+
+/*
+ * User request to flush a range of virtual addresses from the GRU TLB
+ * (Mainly for testing).
+ */
+int gru_user_flush_tlb(unsigned long arg)
+{
+ struct gru_thread_state *gts;
+ struct gru_flush_tlb_req req;
+ struct gru_mm_struct *gms;
+
+ STAT(user_flush_tlb);
+ if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
+ return -EFAULT;
+
+ gru_dbg(grudev, "gseg 0x%lx, vaddr 0x%lx, len 0x%lx\n", req.gseg,
+ req.vaddr, req.len);
+
+ gts = gru_find_lock_gts(req.gseg);
+ if (!gts)
+ return -EINVAL;
+
+ gms = gts->ts_gms;
+ gru_unlock_gts(gts);
+ gru_flush_tlb_range(gms, req.vaddr, req.len);
+
+ return 0;
+}
+
+/*
+ * Fetch GSEG statisticss
+ */
+long gru_get_gseg_statistics(unsigned long arg)
+{
+ struct gru_thread_state *gts;
+ struct gru_get_gseg_statistics_req req;
+
+ if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
+ return -EFAULT;
+
+ /*
+ * The library creates arrays of contexts for threaded programs.
+ * If no gts exists in the array, the context has never been used & all
+ * statistics are implicitly 0.
+ */
+ gts = gru_find_lock_gts(req.gseg);
+ if (gts) {
+ memcpy(&req.stats, &gts->ustats, sizeof(gts->ustats));
+ gru_unlock_gts(gts);
+ } else {
+ memset(&req.stats, 0, sizeof(gts->ustats));
+ }
+
+ if (copy_to_user((void __user *)arg, &req, sizeof(req)))
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ * Register the current task as the user of the GSEG slice.
+ * Needed for TLB fault interrupt targeting.
+ */
+int gru_set_context_option(unsigned long arg)
+{
+ struct gru_thread_state *gts;
+ struct gru_set_context_option_req req;
+ int ret = 0;
+
+ STAT(set_context_option);
+ if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
+ return -EFAULT;
+ gru_dbg(grudev, "op %d, gseg 0x%lx, value1 0x%lx\n", req.op, req.gseg, req.val1);
+
+ gts = gru_find_lock_gts(req.gseg);
+ if (!gts) {
+ gts = gru_alloc_locked_gts(req.gseg);
+ if (IS_ERR(gts))
+ return PTR_ERR(gts);
+ }
+
+ switch (req.op) {
+ case sco_blade_chiplet:
+ /* Select blade/chiplet for GRU context */
+ if (req.val1 < -1 || req.val1 >= GRU_MAX_BLADES || !gru_base[req.val1] ||
+ req.val0 < -1 || req.val0 >= GRU_CHIPLETS_PER_HUB) {
+ ret = -EINVAL;
+ } else {
+ gts->ts_user_blade_id = req.val1;
+ gts->ts_user_chiplet_id = req.val0;
+ gru_check_context_placement(gts);
+ }
+ break;
+ case sco_gseg_owner:
+ /* Register the current task as the GSEG owner */
+ gts->ts_tgid_owner = current->tgid;
+ break;
+ case sco_cch_req_slice:
+ /* Set the CCH slice option */
+ gts->ts_cch_req_slice = req.val1 & 3;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ gru_unlock_gts(gts);
+
+ return ret;
+}
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c
new file mode 100644
index 00000000..ecafa4ba
--- /dev/null
+++ b/drivers/misc/sgi-gru/grufile.c
@@ -0,0 +1,618 @@
+/*
+ * SN Platform GRU Driver
+ *
+ * FILE OPERATIONS & DRIVER INITIALIZATION
+ *
+ * This file supports the user system call for file open, close, mmap, etc.
+ * This also incudes the driver initialization code.
+ *
+ * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/interrupt.h>
+#include <linux/proc_fs.h>
+#include <linux/uaccess.h>
+#ifdef CONFIG_X86_64
+#include <asm/uv/uv_irq.h>
+#endif
+#include <asm/uv/uv.h>
+#include "gru.h"
+#include "grulib.h"
+#include "grutables.h"
+
+#include <asm/uv/uv_hub.h>
+#include <asm/uv/uv_mmrs.h>
+
+struct gru_blade_state *gru_base[GRU_MAX_BLADES] __read_mostly;
+unsigned long gru_start_paddr __read_mostly;
+void *gru_start_vaddr __read_mostly;
+unsigned long gru_end_paddr __read_mostly;
+unsigned int gru_max_gids __read_mostly;
+struct gru_stats_s gru_stats;
+
+/* Guaranteed user available resources on each node */
+static int max_user_cbrs, max_user_dsr_bytes;
+
+static struct miscdevice gru_miscdev;
+
+
+/*
+ * gru_vma_close
+ *
+ * Called when unmapping a device mapping. Frees all gru resources
+ * and tables belonging to the vma.
+ */
+static void gru_vma_close(struct vm_area_struct *vma)
+{
+ struct gru_vma_data *vdata;
+ struct gru_thread_state *gts;
+ struct list_head *entry, *next;
+
+ if (!vma->vm_private_data)
+ return;
+
+ vdata = vma->vm_private_data;
+ vma->vm_private_data = NULL;
+ gru_dbg(grudev, "vma %p, file %p, vdata %p\n", vma, vma->vm_file,
+ vdata);
+ list_for_each_safe(entry, next, &vdata->vd_head) {
+ gts =
+ list_entry(entry, struct gru_thread_state, ts_next);
+ list_del(&gts->ts_next);
+ mutex_lock(&gts->ts_ctxlock);
+ if (gts->ts_gru)
+ gru_unload_context(gts, 0);
+ mutex_unlock(&gts->ts_ctxlock);
+ gts_drop(gts);
+ }
+ kfree(vdata);
+ STAT(vdata_free);
+}
+
+/*
+ * gru_file_mmap
+ *
+ * Called when mmapping the device. Initializes the vma with a fault handler
+ * and private data structure necessary to allocate, track, and free the
+ * underlying pages.
+ */
+static int gru_file_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ if ((vma->vm_flags & (VM_SHARED | VM_WRITE)) != (VM_SHARED | VM_WRITE))
+ return -EPERM;
+
+ if (vma->vm_start & (GRU_GSEG_PAGESIZE - 1) ||
+ vma->vm_end & (GRU_GSEG_PAGESIZE - 1))
+ return -EINVAL;
+
+ vma->vm_flags |=
+ (VM_IO | VM_DONTCOPY | VM_LOCKED | VM_DONTEXPAND | VM_PFNMAP |
+ VM_RESERVED);
+ vma->vm_page_prot = PAGE_SHARED;
+ vma->vm_ops = &gru_vm_ops;
+
+ vma->vm_private_data = gru_alloc_vma_data(vma, 0);
+ if (!vma->vm_private_data)
+ return -ENOMEM;
+
+ gru_dbg(grudev, "file %p, vaddr 0x%lx, vma %p, vdata %p\n",
+ file, vma->vm_start, vma, vma->vm_private_data);
+ return 0;
+}
+
+/*
+ * Create a new GRU context
+ */
+static int gru_create_new_context(unsigned long arg)
+{
+ struct gru_create_context_req req;
+ struct vm_area_struct *vma;
+ struct gru_vma_data *vdata;
+ int ret = -EINVAL;
+
+ if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
+ return -EFAULT;
+
+ if (req.data_segment_bytes > max_user_dsr_bytes)
+ return -EINVAL;
+ if (req.control_blocks > max_user_cbrs || !req.maximum_thread_count)
+ return -EINVAL;
+
+ if (!(req.options & GRU_OPT_MISS_MASK))
+ req.options |= GRU_OPT_MISS_FMM_INTR;
+
+ down_write(&current->mm->mmap_sem);
+ vma = gru_find_vma(req.gseg);
+ if (vma) {
+ vdata = vma->vm_private_data;
+ vdata->vd_user_options = req.options;
+ vdata->vd_dsr_au_count =
+ GRU_DS_BYTES_TO_AU(req.data_segment_bytes);
+ vdata->vd_cbr_au_count = GRU_CB_COUNT_TO_AU(req.control_blocks);
+ vdata->vd_tlb_preload_count = req.tlb_preload_count;
+ ret = 0;
+ }
+ up_write(&current->mm->mmap_sem);
+
+ return ret;
+}
+
+/*
+ * Get GRU configuration info (temp - for emulator testing)
+ */
+static long gru_get_config_info(unsigned long arg)
+{
+ struct gru_config_info info;
+ int nodesperblade;
+
+ if (num_online_nodes() > 1 &&
+ (uv_node_to_blade_id(1) == uv_node_to_blade_id(0)))
+ nodesperblade = 2;
+ else
+ nodesperblade = 1;
+ info.cpus = num_online_cpus();
+ info.nodes = num_online_nodes();
+ info.blades = info.nodes / nodesperblade;
+ info.chiplets = GRU_CHIPLETS_PER_BLADE * info.blades;
+
+ if (copy_to_user((void __user *)arg, &info, sizeof(info)))
+ return -EFAULT;
+ return 0;
+}
+
+/*
+ * gru_file_unlocked_ioctl
+ *
+ * Called to update file attributes via IOCTL calls.
+ */
+static long gru_file_unlocked_ioctl(struct file *file, unsigned int req,
+ unsigned long arg)
+{
+ int err = -EBADRQC;
+
+ gru_dbg(grudev, "file %p, req 0x%x, 0x%lx\n", file, req, arg);
+
+ switch (req) {
+ case GRU_CREATE_CONTEXT:
+ err = gru_create_new_context(arg);
+ break;
+ case GRU_SET_CONTEXT_OPTION:
+ err = gru_set_context_option(arg);
+ break;
+ case GRU_USER_GET_EXCEPTION_DETAIL:
+ err = gru_get_exception_detail(arg);
+ break;
+ case GRU_USER_UNLOAD_CONTEXT:
+ err = gru_user_unload_context(arg);
+ break;
+ case GRU_USER_FLUSH_TLB:
+ err = gru_user_flush_tlb(arg);
+ break;
+ case GRU_USER_CALL_OS:
+ err = gru_handle_user_call_os(arg);
+ break;
+ case GRU_GET_GSEG_STATISTICS:
+ err = gru_get_gseg_statistics(arg);
+ break;
+ case GRU_KTEST:
+ err = gru_ktest(arg);
+ break;
+ case GRU_GET_CONFIG_INFO:
+ err = gru_get_config_info(arg);
+ break;
+ case GRU_DUMP_CHIPLET_STATE:
+ err = gru_dump_chiplet_request(arg);
+ break;
+ }
+ return err;
+}
+
+/*
+ * Called at init time to build tables for all GRUs that are present in the
+ * system.
+ */
+static void gru_init_chiplet(struct gru_state *gru, unsigned long paddr,
+ void *vaddr, int blade_id, int chiplet_id)
+{
+ spin_lock_init(&gru->gs_lock);
+ spin_lock_init(&gru->gs_asid_lock);
+ gru->gs_gru_base_paddr = paddr;
+ gru->gs_gru_base_vaddr = vaddr;
+ gru->gs_gid = blade_id * GRU_CHIPLETS_PER_BLADE + chiplet_id;
+ gru->gs_blade = gru_base[blade_id];
+ gru->gs_blade_id = blade_id;
+ gru->gs_chiplet_id = chiplet_id;
+ gru->gs_cbr_map = (GRU_CBR_AU == 64) ? ~0 : (1UL << GRU_CBR_AU) - 1;
+ gru->gs_dsr_map = (1UL << GRU_DSR_AU) - 1;
+ gru->gs_asid_limit = MAX_ASID;
+ gru_tgh_flush_init(gru);
+ if (gru->gs_gid >= gru_max_gids)
+ gru_max_gids = gru->gs_gid + 1;
+ gru_dbg(grudev, "bid %d, gid %d, vaddr %p (0x%lx)\n",
+ blade_id, gru->gs_gid, gru->gs_gru_base_vaddr,
+ gru->gs_gru_base_paddr);
+}
+
+static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr)
+{
+ int pnode, nid, bid, chip;
+ int cbrs, dsrbytes, n;
+ int order = get_order(sizeof(struct gru_blade_state));
+ struct page *page;
+ struct gru_state *gru;
+ unsigned long paddr;
+ void *vaddr;
+
+ max_user_cbrs = GRU_NUM_CB;
+ max_user_dsr_bytes = GRU_NUM_DSR_BYTES;
+ for_each_possible_blade(bid) {
+ pnode = uv_blade_to_pnode(bid);
+ nid = uv_blade_to_memory_nid(bid);/* -1 if no memory on blade */
+ page = alloc_pages_node(nid, GFP_KERNEL, order);
+ if (!page)
+ goto fail;
+ gru_base[bid] = page_address(page);
+ memset(gru_base[bid], 0, sizeof(struct gru_blade_state));
+ gru_base[bid]->bs_lru_gru = &gru_base[bid]->bs_grus[0];
+ spin_lock_init(&gru_base[bid]->bs_lock);
+ init_rwsem(&gru_base[bid]->bs_kgts_sema);
+
+ dsrbytes = 0;
+ cbrs = 0;
+ for (gru = gru_base[bid]->bs_grus, chip = 0;
+ chip < GRU_CHIPLETS_PER_BLADE;
+ chip++, gru++) {
+ paddr = gru_chiplet_paddr(gru_base_paddr, pnode, chip);
+ vaddr = gru_chiplet_vaddr(gru_base_vaddr, pnode, chip);
+ gru_init_chiplet(gru, paddr, vaddr, bid, chip);
+ n = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE;
+ cbrs = max(cbrs, n);
+ n = hweight64(gru->gs_dsr_map) * GRU_DSR_AU_BYTES;
+ dsrbytes = max(dsrbytes, n);
+ }
+ max_user_cbrs = min(max_user_cbrs, cbrs);
+ max_user_dsr_bytes = min(max_user_dsr_bytes, dsrbytes);
+ }
+
+ return 0;
+
+fail:
+ for (bid--; bid >= 0; bid--)
+ free_pages((unsigned long)gru_base[bid], order);
+ return -ENOMEM;
+}
+
+static void gru_free_tables(void)
+{
+ int bid;
+ int order = get_order(sizeof(struct gru_state) *
+ GRU_CHIPLETS_PER_BLADE);
+
+ for (bid = 0; bid < GRU_MAX_BLADES; bid++)
+ free_pages((unsigned long)gru_base[bid], order);
+}
+
+static unsigned long gru_chiplet_cpu_to_mmr(int chiplet, int cpu, int *corep)
+{
+ unsigned long mmr = 0;
+ int core;
+
+ /*
+ * We target the cores of a blade and not the hyperthreads themselves.
+ * There is a max of 8 cores per socket and 2 sockets per blade,
+ * making for a max total of 16 cores (i.e., 16 CPUs without
+ * hyperthreading and 32 CPUs with hyperthreading).
+ */
+ core = uv_cpu_core_number(cpu) + UV_MAX_INT_CORES * uv_cpu_socket_number(cpu);
+ if (core >= GRU_NUM_TFM || uv_cpu_ht_number(cpu))
+ return 0;
+
+ if (chiplet == 0) {
+ mmr = UVH_GR0_TLB_INT0_CONFIG +
+ core * (UVH_GR0_TLB_INT1_CONFIG - UVH_GR0_TLB_INT0_CONFIG);
+ } else if (chiplet == 1) {
+ mmr = UVH_GR1_TLB_INT0_CONFIG +
+ core * (UVH_GR1_TLB_INT1_CONFIG - UVH_GR1_TLB_INT0_CONFIG);
+ } else {
+ BUG();
+ }
+
+ *corep = core;
+ return mmr;
+}
+
+#ifdef CONFIG_IA64
+
+static int gru_irq_count[GRU_CHIPLETS_PER_BLADE];
+
+static void gru_noop(struct irq_data *d)
+{
+}
+
+static struct irq_chip gru_chip[GRU_CHIPLETS_PER_BLADE] = {
+ [0 ... GRU_CHIPLETS_PER_BLADE - 1] {
+ .irq_mask = gru_noop,
+ .irq_unmask = gru_noop,
+ .irq_ack = gru_noop
+ }
+};
+
+static int gru_chiplet_setup_tlb_irq(int chiplet, char *irq_name,
+ irq_handler_t irq_handler, int cpu, int blade)
+{
+ unsigned long mmr;
+ int irq = IRQ_GRU + chiplet;
+ int ret, core;
+
+ mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
+ if (mmr == 0)
+ return 0;
+
+ if (gru_irq_count[chiplet] == 0) {
+ gru_chip[chiplet].name = irq_name;
+ ret = irq_set_chip(irq, &gru_chip[chiplet]);
+ if (ret) {
+ printk(KERN_ERR "%s: set_irq_chip failed, errno=%d\n",
+ GRU_DRIVER_ID_STR, -ret);
+ return ret;
+ }
+
+ ret = request_irq(irq, irq_handler, 0, irq_name, NULL);
+ if (ret) {
+ printk(KERN_ERR "%s: request_irq failed, errno=%d\n",
+ GRU_DRIVER_ID_STR, -ret);
+ return ret;
+ }
+ }
+ gru_irq_count[chiplet]++;
+
+ return 0;
+}
+
+static void gru_chiplet_teardown_tlb_irq(int chiplet, int cpu, int blade)
+{
+ unsigned long mmr;
+ int core, irq = IRQ_GRU + chiplet;
+
+ if (gru_irq_count[chiplet] == 0)
+ return;
+
+ mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
+ if (mmr == 0)
+ return;
+
+ if (--gru_irq_count[chiplet] == 0)
+ free_irq(irq, NULL);
+}
+
+#elif defined CONFIG_X86_64
+
+static int gru_chiplet_setup_tlb_irq(int chiplet, char *irq_name,
+ irq_handler_t irq_handler, int cpu, int blade)
+{
+ unsigned long mmr;
+ int irq, core;
+ int ret;
+
+ mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
+ if (mmr == 0)
+ return 0;
+
+ irq = uv_setup_irq(irq_name, cpu, blade, mmr, UV_AFFINITY_CPU);
+ if (irq < 0) {
+ printk(KERN_ERR "%s: uv_setup_irq failed, errno=%d\n",
+ GRU_DRIVER_ID_STR, -irq);
+ return irq;
+ }
+
+ ret = request_irq(irq, irq_handler, 0, irq_name, NULL);
+ if (ret) {
+ uv_teardown_irq(irq);
+ printk(KERN_ERR "%s: request_irq failed, errno=%d\n",
+ GRU_DRIVER_ID_STR, -ret);
+ return ret;
+ }
+ gru_base[blade]->bs_grus[chiplet].gs_irq[core] = irq;
+ return 0;
+}
+
+static void gru_chiplet_teardown_tlb_irq(int chiplet, int cpu, int blade)
+{
+ int irq, core;
+ unsigned long mmr;
+
+ mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
+ if (mmr) {
+ irq = gru_base[blade]->bs_grus[chiplet].gs_irq[core];
+ if (irq) {
+ free_irq(irq, NULL);
+ uv_teardown_irq(irq);
+ }
+ }
+}
+
+#endif
+
+static void gru_teardown_tlb_irqs(void)
+{
+ int blade;
+ int cpu;
+
+ for_each_online_cpu(cpu) {
+ blade = uv_cpu_to_blade_id(cpu);
+ gru_chiplet_teardown_tlb_irq(0, cpu, blade);
+ gru_chiplet_teardown_tlb_irq(1, cpu, blade);
+ }
+ for_each_possible_blade(blade) {
+ if (uv_blade_nr_possible_cpus(blade))
+ continue;
+ gru_chiplet_teardown_tlb_irq(0, 0, blade);
+ gru_chiplet_teardown_tlb_irq(1, 0, blade);
+ }
+}
+
+static int gru_setup_tlb_irqs(void)
+{
+ int blade;
+ int cpu;
+ int ret;
+
+ for_each_online_cpu(cpu) {
+ blade = uv_cpu_to_blade_id(cpu);
+ ret = gru_chiplet_setup_tlb_irq(0, "GRU0_TLB", gru0_intr, cpu, blade);
+ if (ret != 0)
+ goto exit1;
+
+ ret = gru_chiplet_setup_tlb_irq(1, "GRU1_TLB", gru1_intr, cpu, blade);
+ if (ret != 0)
+ goto exit1;
+ }
+ for_each_possible_blade(blade) {
+ if (uv_blade_nr_possible_cpus(blade))
+ continue;
+ ret = gru_chiplet_setup_tlb_irq(0, "GRU0_TLB", gru_intr_mblade, 0, blade);
+ if (ret != 0)
+ goto exit1;
+
+ ret = gru_chiplet_setup_tlb_irq(1, "GRU1_TLB", gru_intr_mblade, 0, blade);
+ if (ret != 0)
+ goto exit1;
+ }
+
+ return 0;
+
+exit1:
+ gru_teardown_tlb_irqs();
+ return ret;
+}
+
+/*
+ * gru_init
+ *
+ * Called at boot or module load time to initialize the GRUs.
+ */
+static int __init gru_init(void)
+{
+ int ret;
+
+ if (!is_uv_system())
+ return 0;
+
+#if defined CONFIG_IA64
+ gru_start_paddr = 0xd000000000UL; /* ZZZZZZZZZZZZZZZZZZZ fixme */
+#else
+ gru_start_paddr = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR) &
+ 0x7fffffffffffUL;
+#endif
+ gru_start_vaddr = __va(gru_start_paddr);
+ gru_end_paddr = gru_start_paddr + GRU_MAX_BLADES * GRU_SIZE;
+ printk(KERN_INFO "GRU space: 0x%lx - 0x%lx\n",
+ gru_start_paddr, gru_end_paddr);
+ ret = misc_register(&gru_miscdev);
+ if (ret) {
+ printk(KERN_ERR "%s: misc_register failed\n",
+ GRU_DRIVER_ID_STR);
+ goto exit0;
+ }
+
+ ret = gru_proc_init();
+ if (ret) {
+ printk(KERN_ERR "%s: proc init failed\n", GRU_DRIVER_ID_STR);
+ goto exit1;
+ }
+
+ ret = gru_init_tables(gru_start_paddr, gru_start_vaddr);
+ if (ret) {
+ printk(KERN_ERR "%s: init tables failed\n", GRU_DRIVER_ID_STR);
+ goto exit2;
+ }
+
+ ret = gru_setup_tlb_irqs();
+ if (ret != 0)
+ goto exit3;
+
+ gru_kservices_init();
+
+ printk(KERN_INFO "%s: v%s\n", GRU_DRIVER_ID_STR,
+ GRU_DRIVER_VERSION_STR);
+ return 0;
+
+exit3:
+ gru_free_tables();
+exit2:
+ gru_proc_exit();
+exit1:
+ misc_deregister(&gru_miscdev);
+exit0:
+ return ret;
+
+}
+
+static void __exit gru_exit(void)
+{
+ if (!is_uv_system())
+ return;
+
+ gru_teardown_tlb_irqs();
+ gru_kservices_exit();
+ gru_free_tables();
+ misc_deregister(&gru_miscdev);
+ gru_proc_exit();
+}
+
+static const struct file_operations gru_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = gru_file_unlocked_ioctl,
+ .mmap = gru_file_mmap,
+ .llseek = noop_llseek,
+};
+
+static struct miscdevice gru_miscdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "gru",
+ .fops = &gru_fops,
+};
+
+const struct vm_operations_struct gru_vm_ops = {
+ .close = gru_vma_close,
+ .fault = gru_fault,
+};
+
+#ifndef MODULE
+fs_initcall(gru_init);
+#else
+module_init(gru_init);
+#endif
+module_exit(gru_exit);
+
+module_param(gru_options, ulong, 0644);
+MODULE_PARM_DESC(gru_options, "Various debug options");
+
+MODULE_AUTHOR("Silicon Graphics, Inc.");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(GRU_DRIVER_ID_STR GRU_DRIVER_VERSION_STR);
+MODULE_VERSION(GRU_DRIVER_VERSION_STR);
+
diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
new file mode 100644
index 00000000..2f30badc
--- /dev/null
+++ b/drivers/misc/sgi-gru/gruhandles.c
@@ -0,0 +1,216 @@
+/*
+ * GRU KERNEL MCS INSTRUCTIONS
+ *
+ * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include "gru.h"
+#include "grulib.h"
+#include "grutables.h"
+
+/* 10 sec */
+#ifdef CONFIG_IA64
+#include <asm/processor.h>
+#define GRU_OPERATION_TIMEOUT (((cycles_t) local_cpu_data->itc_freq)*10)
+#define CLKS2NSEC(c) ((c) *1000000000 / local_cpu_data->itc_freq)
+#else
+#include <asm/tsc.h>
+#define GRU_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
+#define CLKS2NSEC(c) ((c) * 1000000 / tsc_khz)
+#endif
+
+/* Extract the status field from a kernel handle */
+#define GET_MSEG_HANDLE_STATUS(h) (((*(unsigned long *)(h)) >> 16) & 3)
+
+struct mcs_op_statistic mcs_op_statistics[mcsop_last];
+
+static void update_mcs_stats(enum mcs_op op, unsigned long clks)
+{
+ unsigned long nsec;
+
+ nsec = CLKS2NSEC(clks);
+ atomic_long_inc(&mcs_op_statistics[op].count);
+ atomic_long_add(nsec, &mcs_op_statistics[op].total);
+ if (mcs_op_statistics[op].max < nsec)
+ mcs_op_statistics[op].max = nsec;
+}
+
+static void start_instruction(void *h)
+{
+ unsigned long *w0 = h;
+
+ wmb(); /* setting CMD/STATUS bits must be last */
+ *w0 = *w0 | 0x20001;
+ gru_flush_cache(h);
+}
+
+static void report_instruction_timeout(void *h)
+{
+ unsigned long goff = GSEGPOFF((unsigned long)h);
+ char *id = "???";
+
+ if (TYPE_IS(CCH, goff))
+ id = "CCH";
+ else if (TYPE_IS(TGH, goff))
+ id = "TGH";
+ else if (TYPE_IS(TFH, goff))
+ id = "TFH";
+
+ panic(KERN_ALERT "GRU %p (%s) is malfunctioning\n", h, id);
+}
+
+static int wait_instruction_complete(void *h, enum mcs_op opc)
+{
+ int status;
+ unsigned long start_time = get_cycles();
+
+ while (1) {
+ cpu_relax();
+ status = GET_MSEG_HANDLE_STATUS(h);
+ if (status != CCHSTATUS_ACTIVE)
+ break;
+ if (GRU_OPERATION_TIMEOUT < (get_cycles() - start_time)) {
+ report_instruction_timeout(h);
+ start_time = get_cycles();
+ }
+ }
+ if (gru_options & OPT_STATS)
+ update_mcs_stats(opc, get_cycles() - start_time);
+ return status;
+}
+
+int cch_allocate(struct gru_context_configuration_handle *cch)
+{
+ int ret;
+
+ cch->opc = CCHOP_ALLOCATE;
+ start_instruction(cch);
+ ret = wait_instruction_complete(cch, cchop_allocate);
+
+ /*
+ * Stop speculation into the GSEG being mapped by the previous ALLOCATE.
+ * The GSEG memory does not exist until the ALLOCATE completes.
+ */
+ sync_core();
+ return ret;
+}
+
+int cch_start(struct gru_context_configuration_handle *cch)
+{
+ cch->opc = CCHOP_START;
+ start_instruction(cch);
+ return wait_instruction_complete(cch, cchop_start);
+}
+
+int cch_interrupt(struct gru_context_configuration_handle *cch)
+{
+ cch->opc = CCHOP_INTERRUPT;
+ start_instruction(cch);
+ return wait_instruction_complete(cch, cchop_interrupt);
+}
+
+int cch_deallocate(struct gru_context_configuration_handle *cch)
+{
+ int ret;
+
+ cch->opc = CCHOP_DEALLOCATE;
+ start_instruction(cch);
+ ret = wait_instruction_complete(cch, cchop_deallocate);
+
+ /*
+ * Stop speculation into the GSEG being unmapped by the previous
+ * DEALLOCATE.
+ */
+ sync_core();
+ return ret;
+}
+
+int cch_interrupt_sync(struct gru_context_configuration_handle
+ *cch)
+{
+ cch->opc = CCHOP_INTERRUPT_SYNC;
+ start_instruction(cch);
+ return wait_instruction_complete(cch, cchop_interrupt_sync);
+}
+
+int tgh_invalidate(struct gru_tlb_global_handle *tgh,
+ unsigned long vaddr, unsigned long vaddrmask,
+ int asid, int pagesize, int global, int n,
+ unsigned short ctxbitmap)
+{
+ tgh->vaddr = vaddr;
+ tgh->asid = asid;
+ tgh->pagesize = pagesize;
+ tgh->n = n;
+ tgh->global = global;
+ tgh->vaddrmask = vaddrmask;
+ tgh->ctxbitmap = ctxbitmap;
+ tgh->opc = TGHOP_TLBINV;
+ start_instruction(tgh);
+ return wait_instruction_complete(tgh, tghop_invalidate);
+}
+
+int tfh_write_only(struct gru_tlb_fault_handle *tfh,
+ unsigned long paddr, int gaa,
+ unsigned long vaddr, int asid, int dirty,
+ int pagesize)
+{
+ tfh->fillasid = asid;
+ tfh->fillvaddr = vaddr;
+ tfh->pfn = paddr >> GRU_PADDR_SHIFT;
+ tfh->gaa = gaa;
+ tfh->dirty = dirty;
+ tfh->pagesize = pagesize;
+ tfh->opc = TFHOP_WRITE_ONLY;
+ start_instruction(tfh);
+ return wait_instruction_complete(tfh, tfhop_write_only);
+}
+
+void tfh_write_restart(struct gru_tlb_fault_handle *tfh,
+ unsigned long paddr, int gaa,
+ unsigned long vaddr, int asid, int dirty,
+ int pagesize)
+{
+ tfh->fillasid = asid;
+ tfh->fillvaddr = vaddr;
+ tfh->pfn = paddr >> GRU_PADDR_SHIFT;
+ tfh->gaa = gaa;
+ tfh->dirty = dirty;
+ tfh->pagesize = pagesize;
+ tfh->opc = TFHOP_WRITE_RESTART;
+ start_instruction(tfh);
+}
+
+void tfh_restart(struct gru_tlb_fault_handle *tfh)
+{
+ tfh->opc = TFHOP_RESTART;
+ start_instruction(tfh);
+}
+
+void tfh_user_polling_mode(struct gru_tlb_fault_handle *tfh)
+{
+ tfh->opc = TFHOP_USER_POLLING_MODE;
+ start_instruction(tfh);
+}
+
+void tfh_exception(struct gru_tlb_fault_handle *tfh)
+{
+ tfh->opc = TFHOP_EXCEPTION;
+ start_instruction(tfh);
+}
+
diff --git a/drivers/misc/sgi-gru/gruhandles.h b/drivers/misc/sgi-gru/gruhandles.h
new file mode 100644
index 00000000..3f998b92
--- /dev/null
+++ b/drivers/misc/sgi-gru/gruhandles.h
@@ -0,0 +1,531 @@
+/*
+ * SN Platform GRU Driver
+ *
+ * GRU HANDLE DEFINITION
+ *
+ * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __GRUHANDLES_H__
+#define __GRUHANDLES_H__
+#include "gru_instructions.h"
+
+/*
+ * Manifest constants for GRU Memory Map
+ */
+#define GRU_GSEG0_BASE 0
+#define GRU_MCS_BASE (64 * 1024 * 1024)
+#define GRU_SIZE (128UL * 1024 * 1024)
+
+/* Handle & resource counts */
+#define GRU_NUM_CB 128
+#define GRU_NUM_DSR_BYTES (32 * 1024)
+#define GRU_NUM_TFM 16
+#define GRU_NUM_TGH 24
+#define GRU_NUM_CBE 128
+#define GRU_NUM_TFH 128
+#define GRU_NUM_CCH 16
+
+/* Maximum resource counts that can be reserved by user programs */
+#define GRU_NUM_USER_CBR GRU_NUM_CBE
+#define GRU_NUM_USER_DSR_BYTES GRU_NUM_DSR_BYTES
+
+/* Bytes per handle & handle stride. Code assumes all cb, tfh, cbe handles
+ * are the same */
+#define GRU_HANDLE_BYTES 64
+#define GRU_HANDLE_STRIDE 256
+
+/* Base addresses of handles */
+#define GRU_TFM_BASE (GRU_MCS_BASE + 0x00000)
+#define GRU_TGH_BASE (GRU_MCS_BASE + 0x08000)
+#define GRU_CBE_BASE (GRU_MCS_BASE + 0x10000)
+#define GRU_TFH_BASE (GRU_MCS_BASE + 0x18000)
+#define GRU_CCH_BASE (GRU_MCS_BASE + 0x20000)
+
+/* User gseg constants */
+#define GRU_GSEG_STRIDE (4 * 1024 * 1024)
+#define GSEG_BASE(a) ((a) & ~(GRU_GSEG_PAGESIZE - 1))
+
+/* Data segment constants */
+#define GRU_DSR_AU_BYTES 1024
+#define GRU_DSR_CL (GRU_NUM_DSR_BYTES / GRU_CACHE_LINE_BYTES)
+#define GRU_DSR_AU_CL (GRU_DSR_AU_BYTES / GRU_CACHE_LINE_BYTES)
+#define GRU_DSR_AU (GRU_NUM_DSR_BYTES / GRU_DSR_AU_BYTES)
+
+/* Control block constants */
+#define GRU_CBR_AU_SIZE 2
+#define GRU_CBR_AU (GRU_NUM_CBE / GRU_CBR_AU_SIZE)
+
+/* Convert resource counts to the number of AU */
+#define GRU_DS_BYTES_TO_AU(n) DIV_ROUND_UP(n, GRU_DSR_AU_BYTES)
+#define GRU_CB_COUNT_TO_AU(n) DIV_ROUND_UP(n, GRU_CBR_AU_SIZE)
+
+/* UV limits */
+#define GRU_CHIPLETS_PER_HUB 2
+#define GRU_HUBS_PER_BLADE 1
+#define GRU_CHIPLETS_PER_BLADE (GRU_HUBS_PER_BLADE * GRU_CHIPLETS_PER_HUB)
+
+/* User GRU Gseg offsets */
+#define GRU_CB_BASE 0
+#define GRU_CB_LIMIT (GRU_CB_BASE + GRU_HANDLE_STRIDE * GRU_NUM_CBE)
+#define GRU_DS_BASE 0x20000
+#define GRU_DS_LIMIT (GRU_DS_BASE + GRU_NUM_DSR_BYTES)
+
+/* Convert a GRU physical address to the chiplet offset */
+#define GSEGPOFF(h) ((h) & (GRU_SIZE - 1))
+
+/* Convert an arbitrary handle address to the beginning of the GRU segment */
+#define GRUBASE(h) ((void *)((unsigned long)(h) & ~(GRU_SIZE - 1)))
+
+/* Test a valid handle address to determine the type */
+#define TYPE_IS(hn, h) ((h) >= GRU_##hn##_BASE && (h) < \
+ GRU_##hn##_BASE + GRU_NUM_##hn * GRU_HANDLE_STRIDE && \
+ (((h) & (GRU_HANDLE_STRIDE - 1)) == 0))
+
+
+/* General addressing macros. */
+static inline void *get_gseg_base_address(void *base, int ctxnum)
+{
+ return (void *)(base + GRU_GSEG0_BASE + GRU_GSEG_STRIDE * ctxnum);
+}
+
+static inline void *get_gseg_base_address_cb(void *base, int ctxnum, int line)
+{
+ return (void *)(get_gseg_base_address(base, ctxnum) +
+ GRU_CB_BASE + GRU_HANDLE_STRIDE * line);
+}
+
+static inline void *get_gseg_base_address_ds(void *base, int ctxnum, int line)
+{
+ return (void *)(get_gseg_base_address(base, ctxnum) + GRU_DS_BASE +
+ GRU_CACHE_LINE_BYTES * line);
+}
+
+static inline struct gru_tlb_fault_map *get_tfm(void *base, int ctxnum)
+{
+ return (struct gru_tlb_fault_map *)(base + GRU_TFM_BASE +
+ ctxnum * GRU_HANDLE_STRIDE);
+}
+
+static inline struct gru_tlb_global_handle *get_tgh(void *base, int ctxnum)
+{
+ return (struct gru_tlb_global_handle *)(base + GRU_TGH_BASE +
+ ctxnum * GRU_HANDLE_STRIDE);
+}
+
+static inline struct gru_control_block_extended *get_cbe(void *base, int ctxnum)
+{
+ return (struct gru_control_block_extended *)(base + GRU_CBE_BASE +
+ ctxnum * GRU_HANDLE_STRIDE);
+}
+
+static inline struct gru_tlb_fault_handle *get_tfh(void *base, int ctxnum)
+{
+ return (struct gru_tlb_fault_handle *)(base + GRU_TFH_BASE +
+ ctxnum * GRU_HANDLE_STRIDE);
+}
+
+static inline struct gru_context_configuration_handle *get_cch(void *base,
+ int ctxnum)
+{
+ return (struct gru_context_configuration_handle *)(base +
+ GRU_CCH_BASE + ctxnum * GRU_HANDLE_STRIDE);
+}
+
+static inline unsigned long get_cb_number(void *cb)
+{
+ return (((unsigned long)cb - GRU_CB_BASE) % GRU_GSEG_PAGESIZE) /
+ GRU_HANDLE_STRIDE;
+}
+
+/* byte offset to a specific GRU chiplet. (p=pnode, c=chiplet (0 or 1)*/
+static inline unsigned long gru_chiplet_paddr(unsigned long paddr, int pnode,
+ int chiplet)
+{
+ return paddr + GRU_SIZE * (2 * pnode + chiplet);
+}
+
+static inline void *gru_chiplet_vaddr(void *vaddr, int pnode, int chiplet)
+{
+ return vaddr + GRU_SIZE * (2 * pnode + chiplet);
+}
+
+static inline struct gru_control_block_extended *gru_tfh_to_cbe(
+ struct gru_tlb_fault_handle *tfh)
+{
+ unsigned long cbe;
+
+ cbe = (unsigned long)tfh - GRU_TFH_BASE + GRU_CBE_BASE;
+ return (struct gru_control_block_extended*)cbe;
+}
+
+
+
+
+/*
+ * Global TLB Fault Map
+ * Bitmap of outstanding TLB misses needing interrupt/polling service.
+ *
+ */
+struct gru_tlb_fault_map {
+ unsigned long fault_bits[BITS_TO_LONGS(GRU_NUM_CBE)];
+ unsigned long fill0[2];
+ unsigned long done_bits[BITS_TO_LONGS(GRU_NUM_CBE)];
+ unsigned long fill1[2];
+};
+
+/*
+ * TGH - TLB Global Handle
+ * Used for TLB flushing.
+ *
+ */
+struct gru_tlb_global_handle {
+ unsigned int cmd:1; /* DW 0 */
+ unsigned int delresp:1;
+ unsigned int opc:1;
+ unsigned int fill1:5;
+
+ unsigned int fill2:8;
+
+ unsigned int status:2;
+ unsigned long fill3:2;
+ unsigned int state:3;
+ unsigned long fill4:1;
+
+ unsigned int cause:3;
+ unsigned long fill5:37;
+
+ unsigned long vaddr:64; /* DW 1 */
+
+ unsigned int asid:24; /* DW 2 */
+ unsigned int fill6:8;
+
+ unsigned int pagesize:5;
+ unsigned int fill7:11;
+
+ unsigned int global:1;
+ unsigned int fill8:15;
+
+ unsigned long vaddrmask:39; /* DW 3 */
+ unsigned int fill9:9;
+ unsigned int n:10;
+ unsigned int fill10:6;
+
+ unsigned int ctxbitmap:16; /* DW4 */
+ unsigned long fill11[3];
+};
+
+enum gru_tgh_cmd {
+ TGHCMD_START
+};
+
+enum gru_tgh_opc {
+ TGHOP_TLBNOP,
+ TGHOP_TLBINV
+};
+
+enum gru_tgh_status {
+ TGHSTATUS_IDLE,
+ TGHSTATUS_EXCEPTION,
+ TGHSTATUS_ACTIVE
+};
+
+enum gru_tgh_state {
+ TGHSTATE_IDLE,
+ TGHSTATE_PE_INVAL,
+ TGHSTATE_INTERRUPT_INVAL,
+ TGHSTATE_WAITDONE,
+ TGHSTATE_RESTART_CTX,
+};
+
+enum gru_tgh_cause {
+ TGHCAUSE_RR_ECC,
+ TGHCAUSE_TLB_ECC,
+ TGHCAUSE_LRU_ECC,
+ TGHCAUSE_PS_ECC,
+ TGHCAUSE_MUL_ERR,
+ TGHCAUSE_DATA_ERR,
+ TGHCAUSE_SW_FORCE
+};
+
+
+/*
+ * TFH - TLB Global Handle
+ * Used for TLB dropins into the GRU TLB.
+ *
+ */
+struct gru_tlb_fault_handle {
+ unsigned int cmd:1; /* DW 0 - low 32*/
+ unsigned int delresp:1;
+ unsigned int fill0:2;
+ unsigned int opc:3;
+ unsigned int fill1:9;
+
+ unsigned int status:2;
+ unsigned int fill2:2;
+ unsigned int state:3;
+ unsigned int fill3:1;
+
+ unsigned int cause:6;
+ unsigned int cb_int:1;
+ unsigned int fill4:1;
+
+ unsigned int indexway:12; /* DW 0 - high 32 */
+ unsigned int fill5:4;
+
+ unsigned int ctxnum:4;
+ unsigned int fill6:12;
+
+ unsigned long missvaddr:64; /* DW 1 */
+
+ unsigned int missasid:24; /* DW 2 */
+ unsigned int fill7:8;
+ unsigned int fillasid:24;
+ unsigned int dirty:1;
+ unsigned int gaa:2;
+ unsigned long fill8:5;
+
+ unsigned long pfn:41; /* DW 3 */
+ unsigned int fill9:7;
+ unsigned int pagesize:5;
+ unsigned int fill10:11;
+
+ unsigned long fillvaddr:64; /* DW 4 */
+
+ unsigned long fill11[3];
+};
+
+enum gru_tfh_opc {
+ TFHOP_NOOP,
+ TFHOP_RESTART,
+ TFHOP_WRITE_ONLY,
+ TFHOP_WRITE_RESTART,
+ TFHOP_EXCEPTION,
+ TFHOP_USER_POLLING_MODE = 7,
+};
+
+enum tfh_status {
+ TFHSTATUS_IDLE,
+ TFHSTATUS_EXCEPTION,
+ TFHSTATUS_ACTIVE,
+};
+
+enum tfh_state {
+ TFHSTATE_INACTIVE,
+ TFHSTATE_IDLE,
+ TFHSTATE_MISS_UPM,
+ TFHSTATE_MISS_FMM,
+ TFHSTATE_HW_ERR,
+ TFHSTATE_WRITE_TLB,
+ TFHSTATE_RESTART_CBR,
+};
+
+/* TFH cause bits */
+enum tfh_cause {
+ TFHCAUSE_NONE,
+ TFHCAUSE_TLB_MISS,
+ TFHCAUSE_TLB_MOD,
+ TFHCAUSE_HW_ERROR_RR,
+ TFHCAUSE_HW_ERROR_MAIN_ARRAY,
+ TFHCAUSE_HW_ERROR_VALID,
+ TFHCAUSE_HW_ERROR_PAGESIZE,
+ TFHCAUSE_INSTRUCTION_EXCEPTION,
+ TFHCAUSE_UNCORRECTIBLE_ERROR,
+};
+
+/* GAA values */
+#define GAA_RAM 0x0
+#define GAA_NCRAM 0x2
+#define GAA_MMIO 0x1
+#define GAA_REGISTER 0x3
+
+/* GRU paddr shift for pfn. (NOTE: shift is NOT by actual pagesize) */
+#define GRU_PADDR_SHIFT 12
+
+/*
+ * Context Configuration handle
+ * Used to allocate resources to a GSEG context.
+ *
+ */
+struct gru_context_configuration_handle {
+ unsigned int cmd:1; /* DW0 */
+ unsigned int delresp:1;
+ unsigned int opc:3;
+ unsigned int unmap_enable:1;
+ unsigned int req_slice_set_enable:1;
+ unsigned int req_slice:2;
+ unsigned int cb_int_enable:1;
+ unsigned int tlb_int_enable:1;
+ unsigned int tfm_fault_bit_enable:1;
+ unsigned int tlb_int_select:4;
+
+ unsigned int status:2;
+ unsigned int state:2;
+ unsigned int reserved2:4;
+
+ unsigned int cause:4;
+ unsigned int tfm_done_bit_enable:1;
+ unsigned int unused:3;
+
+ unsigned int dsr_allocation_map;
+
+ unsigned long cbr_allocation_map; /* DW1 */
+
+ unsigned int asid[8]; /* DW 2 - 5 */
+ unsigned short sizeavail[8]; /* DW 6 - 7 */
+} __attribute__ ((packed));
+
+enum gru_cch_opc {
+ CCHOP_START = 1,
+ CCHOP_ALLOCATE,
+ CCHOP_INTERRUPT,
+ CCHOP_DEALLOCATE,
+ CCHOP_INTERRUPT_SYNC,
+};
+
+enum gru_cch_status {
+ CCHSTATUS_IDLE,
+ CCHSTATUS_EXCEPTION,
+ CCHSTATUS_ACTIVE,
+};
+
+enum gru_cch_state {
+ CCHSTATE_INACTIVE,
+ CCHSTATE_MAPPED,
+ CCHSTATE_ACTIVE,
+ CCHSTATE_INTERRUPTED,
+};
+
+/* CCH Exception cause */
+enum gru_cch_cause {
+ CCHCAUSE_REGION_REGISTER_WRITE_ERROR = 1,
+ CCHCAUSE_ILLEGAL_OPCODE = 2,
+ CCHCAUSE_INVALID_START_REQUEST = 3,
+ CCHCAUSE_INVALID_ALLOCATION_REQUEST = 4,
+ CCHCAUSE_INVALID_DEALLOCATION_REQUEST = 5,
+ CCHCAUSE_INVALID_INTERRUPT_REQUEST = 6,
+ CCHCAUSE_CCH_BUSY = 7,
+ CCHCAUSE_NO_CBRS_TO_ALLOCATE = 8,
+ CCHCAUSE_BAD_TFM_CONFIG = 9,
+ CCHCAUSE_CBR_RESOURCES_OVERSUBSCRIPED = 10,
+ CCHCAUSE_DSR_RESOURCES_OVERSUBSCRIPED = 11,
+ CCHCAUSE_CBR_DEALLOCATION_ERROR = 12,
+};
+/*
+ * CBE - Control Block Extended
+ * Maintains internal GRU state for active CBs.
+ *
+ */
+struct gru_control_block_extended {
+ unsigned int reserved0:1; /* DW 0 - low */
+ unsigned int imacpy:3;
+ unsigned int reserved1:4;
+ unsigned int xtypecpy:3;
+ unsigned int iaa0cpy:2;
+ unsigned int iaa1cpy:2;
+ unsigned int reserved2:1;
+ unsigned int opccpy:8;
+ unsigned int exopccpy:8;
+
+ unsigned int idef2cpy:22; /* DW 0 - high */
+ unsigned int reserved3:10;
+
+ unsigned int idef4cpy:22; /* DW 1 */
+ unsigned int reserved4:10;
+ unsigned int idef4upd:22;
+ unsigned int reserved5:10;
+
+ unsigned long idef1upd:64; /* DW 2 */
+
+ unsigned long idef5cpy:64; /* DW 3 */
+
+ unsigned long idef6cpy:64; /* DW 4 */
+
+ unsigned long idef3upd:64; /* DW 5 */
+
+ unsigned long idef5upd:64; /* DW 6 */
+
+ unsigned int idef2upd:22; /* DW 7 */
+ unsigned int reserved6:10;
+
+ unsigned int ecause:20;
+ unsigned int cbrstate:4;
+ unsigned int cbrexecstatus:8;
+};
+
+/* CBE fields for active BCOPY instructions */
+#define cbe_baddr0 idef1upd
+#define cbe_baddr1 idef3upd
+#define cbe_src_cl idef6cpy
+#define cbe_nelemcur idef5upd
+
+enum gru_cbr_state {
+ CBRSTATE_INACTIVE,
+ CBRSTATE_IDLE,
+ CBRSTATE_PE_CHECK,
+ CBRSTATE_QUEUED,
+ CBRSTATE_WAIT_RESPONSE,
+ CBRSTATE_INTERRUPTED,
+ CBRSTATE_INTERRUPTED_MISS_FMM,
+ CBRSTATE_BUSY_INTERRUPT_MISS_FMM,
+ CBRSTATE_INTERRUPTED_MISS_UPM,
+ CBRSTATE_BUSY_INTERRUPTED_MISS_UPM,
+ CBRSTATE_REQUEST_ISSUE,
+ CBRSTATE_BUSY_INTERRUPT,
+};
+
+/* CBE cbrexecstatus bits - defined in gru_instructions.h*/
+/* CBE ecause bits - defined in gru_instructions.h */
+
+/*
+ * Convert a processor pagesize into the strange encoded pagesize used by the
+ * GRU. Processor pagesize is encoded as log of bytes per page. (or PAGE_SHIFT)
+ * pagesize log pagesize grupagesize
+ * 4k 12 0
+ * 16k 14 1
+ * 64k 16 2
+ * 256k 18 3
+ * 1m 20 4
+ * 2m 21 5
+ * 4m 22 6
+ * 16m 24 7
+ * 64m 26 8
+ * ...
+ */
+#define GRU_PAGESIZE(sh) ((((sh) > 20 ? (sh) + 2 : (sh)) >> 1) - 6)
+#define GRU_SIZEAVAIL(sh) (1UL << GRU_PAGESIZE(sh))
+
+/* minimum TLB purge count to ensure a full purge */
+#define GRUMAXINVAL 1024UL
+
+int cch_allocate(struct gru_context_configuration_handle *cch);
+int cch_start(struct gru_context_configuration_handle *cch);
+int cch_interrupt(struct gru_context_configuration_handle *cch);
+int cch_deallocate(struct gru_context_configuration_handle *cch);
+int cch_interrupt_sync(struct gru_context_configuration_handle *cch);
+int tgh_invalidate(struct gru_tlb_global_handle *tgh, unsigned long vaddr,
+ unsigned long vaddrmask, int asid, int pagesize, int global, int n,
+ unsigned short ctxbitmap);
+int tfh_write_only(struct gru_tlb_fault_handle *tfh, unsigned long paddr,
+ int gaa, unsigned long vaddr, int asid, int dirty, int pagesize);
+void tfh_write_restart(struct gru_tlb_fault_handle *tfh, unsigned long paddr,
+ int gaa, unsigned long vaddr, int asid, int dirty, int pagesize);
+void tfh_restart(struct gru_tlb_fault_handle *tfh);
+void tfh_user_polling_mode(struct gru_tlb_fault_handle *tfh);
+void tfh_exception(struct gru_tlb_fault_handle *tfh);
+
+#endif /* __GRUHANDLES_H__ */
diff --git a/drivers/misc/sgi-gru/grukdump.c b/drivers/misc/sgi-gru/grukdump.c
new file mode 100644
index 00000000..9b2062d1
--- /dev/null
+++ b/drivers/misc/sgi-gru/grukdump.c
@@ -0,0 +1,235 @@
+/*
+ * SN Platform GRU Driver
+ *
+ * Dump GRU State
+ *
+ * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <asm/uv/uv_hub.h>
+#include "gru.h"
+#include "grutables.h"
+#include "gruhandles.h"
+#include "grulib.h"
+
+#define CCH_LOCK_ATTEMPTS 10
+
+static int gru_user_copy_handle(void __user **dp, void *s)
+{
+ if (copy_to_user(*dp, s, GRU_HANDLE_BYTES))
+ return -1;
+ *dp += GRU_HANDLE_BYTES;
+ return 0;
+}
+
+static int gru_dump_context_data(void *grubase,
+ struct gru_context_configuration_handle *cch,
+ void __user *ubuf, int ctxnum, int dsrcnt,
+ int flush_cbrs)
+{
+ void *cb, *cbe, *tfh, *gseg;
+ int i, scr;
+
+ gseg = grubase + ctxnum * GRU_GSEG_STRIDE;
+ cb = gseg + GRU_CB_BASE;
+ cbe = grubase + GRU_CBE_BASE;
+ tfh = grubase + GRU_TFH_BASE;
+
+ for_each_cbr_in_allocation_map(i, &cch->cbr_allocation_map, scr) {
+ if (flush_cbrs)
+ gru_flush_cache(cb);
+ if (gru_user_copy_handle(&ubuf, cb))
+ goto fail;
+ if (gru_user_copy_handle(&ubuf, tfh + i * GRU_HANDLE_STRIDE))
+ goto fail;
+ if (gru_user_copy_handle(&ubuf, cbe + i * GRU_HANDLE_STRIDE))
+ goto fail;
+ cb += GRU_HANDLE_STRIDE;
+ }
+ if (dsrcnt)
+ memcpy(ubuf, gseg + GRU_DS_BASE, dsrcnt * GRU_HANDLE_STRIDE);
+ return 0;
+
+fail:
+ return -EFAULT;
+}
+
+static int gru_dump_tfm(struct gru_state *gru,
+ void __user *ubuf, void __user *ubufend)
+{
+ struct gru_tlb_fault_map *tfm;
+ int i, ret, bytes;
+
+ bytes = GRU_NUM_TFM * GRU_CACHE_LINE_BYTES;
+ if (bytes > ubufend - ubuf)
+ ret = -EFBIG;
+
+ for (i = 0; i < GRU_NUM_TFM; i++) {
+ tfm = get_tfm(gru->gs_gru_base_vaddr, i);
+ if (gru_user_copy_handle(&ubuf, tfm))
+ goto fail;
+ }
+ return GRU_NUM_TFM * GRU_CACHE_LINE_BYTES;
+
+fail:
+ return -EFAULT;
+}
+
+static int gru_dump_tgh(struct gru_state *gru,
+ void __user *ubuf, void __user *ubufend)
+{
+ struct gru_tlb_global_handle *tgh;
+ int i, ret, bytes;
+
+ bytes = GRU_NUM_TGH * GRU_CACHE_LINE_BYTES;
+ if (bytes > ubufend - ubuf)
+ ret = -EFBIG;
+
+ for (i = 0; i < GRU_NUM_TGH; i++) {
+ tgh = get_tgh(gru->gs_gru_base_vaddr, i);
+ if (gru_user_copy_handle(&ubuf, tgh))
+ goto fail;
+ }
+ return GRU_NUM_TGH * GRU_CACHE_LINE_BYTES;
+
+fail:
+ return -EFAULT;
+}
+
+static int gru_dump_context(struct gru_state *gru, int ctxnum,
+ void __user *ubuf, void __user *ubufend, char data_opt,
+ char lock_cch, char flush_cbrs)
+{
+ struct gru_dump_context_header hdr;
+ struct gru_dump_context_header __user *uhdr = ubuf;
+ struct gru_context_configuration_handle *cch, *ubufcch;
+ struct gru_thread_state *gts;
+ int try, cch_locked, cbrcnt = 0, dsrcnt = 0, bytes = 0, ret = 0;
+ void *grubase;
+
+ memset(&hdr, 0, sizeof(hdr));
+ grubase = gru->gs_gru_base_vaddr;
+ cch = get_cch(grubase, ctxnum);
+ for (try = 0; try < CCH_LOCK_ATTEMPTS; try++) {
+ cch_locked = trylock_cch_handle(cch);
+ if (cch_locked)
+ break;
+ msleep(1);
+ }
+
+ ubuf += sizeof(hdr);
+ ubufcch = ubuf;
+ if (gru_user_copy_handle(&ubuf, cch))
+ goto fail;
+ if (cch_locked)
+ ubufcch->delresp = 0;
+ bytes = sizeof(hdr) + GRU_CACHE_LINE_BYTES;
+
+ if (cch_locked || !lock_cch) {
+ gts = gru->gs_gts[ctxnum];
+ if (gts && gts->ts_vma) {
+ hdr.pid = gts->ts_tgid_owner;
+ hdr.vaddr = gts->ts_vma->vm_start;
+ }
+ if (cch->state != CCHSTATE_INACTIVE) {
+ cbrcnt = hweight64(cch->cbr_allocation_map) *
+ GRU_CBR_AU_SIZE;
+ dsrcnt = data_opt ? hweight32(cch->dsr_allocation_map) *
+ GRU_DSR_AU_CL : 0;
+ }
+ bytes += (3 * cbrcnt + dsrcnt) * GRU_CACHE_LINE_BYTES;
+ if (bytes > ubufend - ubuf)
+ ret = -EFBIG;
+ else
+ ret = gru_dump_context_data(grubase, cch, ubuf, ctxnum,
+ dsrcnt, flush_cbrs);
+ }
+ if (cch_locked)
+ unlock_cch_handle(cch);
+ if (ret)
+ return ret;
+
+ hdr.magic = GRU_DUMP_MAGIC;
+ hdr.gid = gru->gs_gid;
+ hdr.ctxnum = ctxnum;
+ hdr.cbrcnt = cbrcnt;
+ hdr.dsrcnt = dsrcnt;
+ hdr.cch_locked = cch_locked;
+ if (!ret && copy_to_user((void __user *)uhdr, &hdr, sizeof(hdr)))
+ ret = -EFAULT;
+
+ return ret ? ret : bytes;
+
+fail:
+ unlock_cch_handle(cch);
+ return -EFAULT;
+}
+
+int gru_dump_chiplet_request(unsigned long arg)
+{
+ struct gru_state *gru;
+ struct gru_dump_chiplet_state_req req;
+ void __user *ubuf;
+ void __user *ubufend;
+ int ctxnum, ret, cnt = 0;
+
+ if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
+ return -EFAULT;
+
+ /* Currently, only dump by gid is implemented */
+ if (req.gid >= gru_max_gids || req.gid < 0)
+ return -EINVAL;
+
+ gru = GID_TO_GRU(req.gid);
+ ubuf = req.buf;
+ ubufend = req.buf + req.buflen;
+
+ ret = gru_dump_tfm(gru, ubuf, ubufend);
+ if (ret < 0)
+ goto fail;
+ ubuf += ret;
+
+ ret = gru_dump_tgh(gru, ubuf, ubufend);
+ if (ret < 0)
+ goto fail;
+ ubuf += ret;
+
+ for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) {
+ if (req.ctxnum == ctxnum || req.ctxnum < 0) {
+ ret = gru_dump_context(gru, ctxnum, ubuf, ubufend,
+ req.data_opt, req.lock_cch,
+ req.flush_cbrs);
+ if (ret < 0)
+ goto fail;
+ ubuf += ret;
+ cnt++;
+ }
+ }
+
+ if (copy_to_user((void __user *)arg, &req, sizeof(req)))
+ return -EFAULT;
+ return cnt;
+
+fail:
+ return ret;
+}
diff --git a/drivers/misc/sgi-gru/grukservices.c b/drivers/misc/sgi-gru/grukservices.c
new file mode 100644
index 00000000..913de07e
--- /dev/null
+++ b/drivers/misc/sgi-gru/grukservices.c
@@ -0,0 +1,1162 @@
+/*
+ * SN Platform GRU Driver
+ *
+ * KERNEL SERVICES THAT USE THE GRU
+ *
+ * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/proc_fs.h>
+#include <linux/interrupt.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <asm/io_apic.h>
+#include "gru.h"
+#include "grulib.h"
+#include "grutables.h"
+#include "grukservices.h"
+#include "gru_instructions.h"
+#include <asm/uv/uv_hub.h>
+
+/*
+ * Kernel GRU Usage
+ *
+ * The following is an interim algorithm for management of kernel GRU
+ * resources. This will likely be replaced when we better understand the
+ * kernel/user requirements.
+ *
+ * Blade percpu resources reserved for kernel use. These resources are
+ * reserved whenever the the kernel context for the blade is loaded. Note
+ * that the kernel context is not guaranteed to be always available. It is
+ * loaded on demand & can be stolen by a user if the user demand exceeds the
+ * kernel demand. The kernel can always reload the kernel context but
+ * a SLEEP may be required!!!.
+ *
+ * Async Overview:
+ *
+ * Each blade has one "kernel context" that owns GRU kernel resources
+ * located on the blade. Kernel drivers use GRU resources in this context
+ * for sending messages, zeroing memory, etc.
+ *
+ * The kernel context is dynamically loaded on demand. If it is not in
+ * use by the kernel, the kernel context can be unloaded & given to a user.
+ * The kernel context will be reloaded when needed. This may require that
+ * a context be stolen from a user.
+ * NOTE: frequent unloading/reloading of the kernel context is
+ * expensive. We are depending on batch schedulers, cpusets, sane
+ * drivers or some other mechanism to prevent the need for frequent
+ * stealing/reloading.
+ *
+ * The kernel context consists of two parts:
+ * - 1 CB & a few DSRs that are reserved for each cpu on the blade.
+ * Each cpu has it's own private resources & does not share them
+ * with other cpus. These resources are used serially, ie,
+ * locked, used & unlocked on each call to a function in
+ * grukservices.
+ * (Now that we have dynamic loading of kernel contexts, I
+ * may rethink this & allow sharing between cpus....)
+ *
+ * - Additional resources can be reserved long term & used directly
+ * by UV drivers located in the kernel. Drivers using these GRU
+ * resources can use asynchronous GRU instructions that send
+ * interrupts on completion.
+ * - these resources must be explicitly locked/unlocked
+ * - locked resources prevent (obviously) the kernel
+ * context from being unloaded.
+ * - drivers using these resource directly issue their own
+ * GRU instruction and must wait/check completion.
+ *
+ * When these resources are reserved, the caller can optionally
+ * associate a wait_queue with the resources and use asynchronous
+ * GRU instructions. When an async GRU instruction completes, the
+ * driver will do a wakeup on the event.
+ *
+ */
+
+
+#define ASYNC_HAN_TO_BID(h) ((h) - 1)
+#define ASYNC_BID_TO_HAN(b) ((b) + 1)
+#define ASYNC_HAN_TO_BS(h) gru_base[ASYNC_HAN_TO_BID(h)]
+
+#define GRU_NUM_KERNEL_CBR 1
+#define GRU_NUM_KERNEL_DSR_BYTES 256
+#define GRU_NUM_KERNEL_DSR_CL (GRU_NUM_KERNEL_DSR_BYTES / \
+ GRU_CACHE_LINE_BYTES)
+
+/* GRU instruction attributes for all instructions */
+#define IMA IMA_CB_DELAY
+
+/* GRU cacheline size is always 64 bytes - even on arches with 128 byte lines */
+#define __gru_cacheline_aligned__ \
+ __attribute__((__aligned__(GRU_CACHE_LINE_BYTES)))
+
+#define MAGIC 0x1234567887654321UL
+
+/* Default retry count for GRU errors on kernel instructions */
+#define EXCEPTION_RETRY_LIMIT 3
+
+/* Status of message queue sections */
+#define MQS_EMPTY 0
+#define MQS_FULL 1
+#define MQS_NOOP 2
+
+/*----------------- RESOURCE MANAGEMENT -------------------------------------*/
+/* optimized for x86_64 */
+struct message_queue {
+ union gru_mesqhead head __gru_cacheline_aligned__; /* CL 0 */
+ int qlines; /* DW 1 */
+ long hstatus[2];
+ void *next __gru_cacheline_aligned__;/* CL 1 */
+ void *limit;
+ void *start;
+ void *start2;
+ char data ____cacheline_aligned; /* CL 2 */
+};
+
+/* First word in every message - used by mesq interface */
+struct message_header {
+ char present;
+ char present2;
+ char lines;
+ char fill;
+};
+
+#define HSTATUS(mq, h) ((mq) + offsetof(struct message_queue, hstatus[h]))
+
+/*
+ * Reload the blade's kernel context into a GRU chiplet. Called holding
+ * the bs_kgts_sema for READ. Will steal user contexts if necessary.
+ */
+static void gru_load_kernel_context(struct gru_blade_state *bs, int blade_id)
+{
+ struct gru_state *gru;
+ struct gru_thread_state *kgts;
+ void *vaddr;
+ int ctxnum, ncpus;
+
+ up_read(&bs->bs_kgts_sema);
+ down_write(&bs->bs_kgts_sema);
+
+ if (!bs->bs_kgts) {
+ bs->bs_kgts = gru_alloc_gts(NULL, 0, 0, 0, 0, 0);
+ bs->bs_kgts->ts_user_blade_id = blade_id;
+ }
+ kgts = bs->bs_kgts;
+
+ if (!kgts->ts_gru) {
+ STAT(load_kernel_context);
+ ncpus = uv_blade_nr_possible_cpus(blade_id);
+ kgts->ts_cbr_au_count = GRU_CB_COUNT_TO_AU(
+ GRU_NUM_KERNEL_CBR * ncpus + bs->bs_async_cbrs);
+ kgts->ts_dsr_au_count = GRU_DS_BYTES_TO_AU(
+ GRU_NUM_KERNEL_DSR_BYTES * ncpus +
+ bs->bs_async_dsr_bytes);
+ while (!gru_assign_gru_context(kgts)) {
+ msleep(1);
+ gru_steal_context(kgts);
+ }
+ gru_load_context(kgts);
+ gru = bs->bs_kgts->ts_gru;
+ vaddr = gru->gs_gru_base_vaddr;
+ ctxnum = kgts->ts_ctxnum;
+ bs->kernel_cb = get_gseg_base_address_cb(vaddr, ctxnum, 0);
+ bs->kernel_dsr = get_gseg_base_address_ds(vaddr, ctxnum, 0);
+ }
+ downgrade_write(&bs->bs_kgts_sema);
+}
+
+/*
+ * Free all kernel contexts that are not currently in use.
+ * Returns 0 if all freed, else number of inuse context.
+ */
+static int gru_free_kernel_contexts(void)
+{
+ struct gru_blade_state *bs;
+ struct gru_thread_state *kgts;
+ int bid, ret = 0;
+
+ for (bid = 0; bid < GRU_MAX_BLADES; bid++) {
+ bs = gru_base[bid];
+ if (!bs)
+ continue;
+
+ /* Ignore busy contexts. Don't want to block here. */
+ if (down_write_trylock(&bs->bs_kgts_sema)) {
+ kgts = bs->bs_kgts;
+ if (kgts && kgts->ts_gru)
+ gru_unload_context(kgts, 0);
+ bs->bs_kgts = NULL;
+ up_write(&bs->bs_kgts_sema);
+ kfree(kgts);
+ } else {
+ ret++;
+ }
+ }
+ return ret;
+}
+
+/*
+ * Lock & load the kernel context for the specified blade.
+ */
+static struct gru_blade_state *gru_lock_kernel_context(int blade_id)
+{
+ struct gru_blade_state *bs;
+ int bid;
+
+ STAT(lock_kernel_context);
+again:
+ bid = blade_id < 0 ? uv_numa_blade_id() : blade_id;
+ bs = gru_base[bid];
+
+ /* Handle the case where migration occurred while waiting for the sema */
+ down_read(&bs->bs_kgts_sema);
+ if (blade_id < 0 && bid != uv_numa_blade_id()) {
+ up_read(&bs->bs_kgts_sema);
+ goto again;
+ }
+ if (!bs->bs_kgts || !bs->bs_kgts->ts_gru)
+ gru_load_kernel_context(bs, bid);
+ return bs;
+
+}
+
+/*
+ * Unlock the kernel context for the specified blade. Context is not
+ * unloaded but may be stolen before next use.
+ */
+static void gru_unlock_kernel_context(int blade_id)
+{
+ struct gru_blade_state *bs;
+
+ bs = gru_base[blade_id];
+ up_read(&bs->bs_kgts_sema);
+ STAT(unlock_kernel_context);
+}
+
+/*
+ * Reserve & get pointers to the DSR/CBRs reserved for the current cpu.
+ * - returns with preemption disabled
+ */
+static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr)
+{
+ struct gru_blade_state *bs;
+ int lcpu;
+
+ BUG_ON(dsr_bytes > GRU_NUM_KERNEL_DSR_BYTES);
+ preempt_disable();
+ bs = gru_lock_kernel_context(-1);
+ lcpu = uv_blade_processor_id();
+ *cb = bs->kernel_cb + lcpu * GRU_HANDLE_STRIDE;
+ *dsr = bs->kernel_dsr + lcpu * GRU_NUM_KERNEL_DSR_BYTES;
+ return 0;
+}
+
+/*
+ * Free the current cpus reserved DSR/CBR resources.
+ */
+static void gru_free_cpu_resources(void *cb, void *dsr)
+{
+ gru_unlock_kernel_context(uv_numa_blade_id());
+ preempt_enable();
+}
+
+/*
+ * Reserve GRU resources to be used asynchronously.
+ * Note: currently supports only 1 reservation per blade.
+ *
+ * input:
+ * blade_id - blade on which resources should be reserved
+ * cbrs - number of CBRs
+ * dsr_bytes - number of DSR bytes needed
+ * output:
+ * handle to identify resource
+ * (0 = async resources already reserved)
+ */
+unsigned long gru_reserve_async_resources(int blade_id, int cbrs, int dsr_bytes,
+ struct completion *cmp)
+{
+ struct gru_blade_state *bs;
+ struct gru_thread_state *kgts;
+ int ret = 0;
+
+ bs = gru_base[blade_id];
+
+ down_write(&bs->bs_kgts_sema);
+
+ /* Verify no resources already reserved */
+ if (bs->bs_async_dsr_bytes + bs->bs_async_cbrs)
+ goto done;
+ bs->bs_async_dsr_bytes = dsr_bytes;
+ bs->bs_async_cbrs = cbrs;
+ bs->bs_async_wq = cmp;
+ kgts = bs->bs_kgts;
+
+ /* Resources changed. Unload context if already loaded */
+ if (kgts && kgts->ts_gru)
+ gru_unload_context(kgts, 0);
+ ret = ASYNC_BID_TO_HAN(blade_id);
+
+done:
+ up_write(&bs->bs_kgts_sema);
+ return ret;
+}
+
+/*
+ * Release async resources previously reserved.
+ *
+ * input:
+ * han - handle to identify resources
+ */
+void gru_release_async_resources(unsigned long han)
+{
+ struct gru_blade_state *bs = ASYNC_HAN_TO_BS(han);
+
+ down_write(&bs->bs_kgts_sema);
+ bs->bs_async_dsr_bytes = 0;
+ bs->bs_async_cbrs = 0;
+ bs->bs_async_wq = NULL;
+ up_write(&bs->bs_kgts_sema);
+}
+
+/*
+ * Wait for async GRU instructions to complete.
+ *
+ * input:
+ * han - handle to identify resources
+ */
+void gru_wait_async_cbr(unsigned long han)
+{
+ struct gru_blade_state *bs = ASYNC_HAN_TO_BS(han);
+
+ wait_for_completion(bs->bs_async_wq);
+ mb();
+}
+
+/*
+ * Lock previous reserved async GRU resources
+ *
+ * input:
+ * han - handle to identify resources
+ * output:
+ * cb - pointer to first CBR
+ * dsr - pointer to first DSR
+ */
+void gru_lock_async_resource(unsigned long han, void **cb, void **dsr)
+{
+ struct gru_blade_state *bs = ASYNC_HAN_TO_BS(han);
+ int blade_id = ASYNC_HAN_TO_BID(han);
+ int ncpus;
+
+ gru_lock_kernel_context(blade_id);
+ ncpus = uv_blade_nr_possible_cpus(blade_id);
+ if (cb)
+ *cb = bs->kernel_cb + ncpus * GRU_HANDLE_STRIDE;
+ if (dsr)
+ *dsr = bs->kernel_dsr + ncpus * GRU_NUM_KERNEL_DSR_BYTES;
+}
+
+/*
+ * Unlock previous reserved async GRU resources
+ *
+ * input:
+ * han - handle to identify resources
+ */
+void gru_unlock_async_resource(unsigned long han)
+{
+ int blade_id = ASYNC_HAN_TO_BID(han);
+
+ gru_unlock_kernel_context(blade_id);
+}
+
+/*----------------------------------------------------------------------*/
+int gru_get_cb_exception_detail(void *cb,
+ struct control_block_extended_exc_detail *excdet)
+{
+ struct gru_control_block_extended *cbe;
+ struct gru_thread_state *kgts = NULL;
+ unsigned long off;
+ int cbrnum, bid;
+
+ /*
+ * Locate kgts for cb. This algorithm is SLOW but
+ * this function is rarely called (ie., almost never).
+ * Performance does not matter.
+ */
+ for_each_possible_blade(bid) {
+ if (!gru_base[bid])
+ break;
+ kgts = gru_base[bid]->bs_kgts;
+ if (!kgts || !kgts->ts_gru)
+ continue;
+ off = cb - kgts->ts_gru->gs_gru_base_vaddr;
+ if (off < GRU_SIZE)
+ break;
+ kgts = NULL;
+ }
+ BUG_ON(!kgts);
+ cbrnum = thread_cbr_number(kgts, get_cb_number(cb));
+ cbe = get_cbe(GRUBASE(cb), cbrnum);
+ gru_flush_cache(cbe); /* CBE not coherent */
+ sync_core();
+ excdet->opc = cbe->opccpy;
+ excdet->exopc = cbe->exopccpy;
+ excdet->ecause = cbe->ecause;
+ excdet->exceptdet0 = cbe->idef1upd;
+ excdet->exceptdet1 = cbe->idef3upd;
+ gru_flush_cache(cbe);
+ return 0;
+}
+
+char *gru_get_cb_exception_detail_str(int ret, void *cb,
+ char *buf, int size)
+{
+ struct gru_control_block_status *gen = (void *)cb;
+ struct control_block_extended_exc_detail excdet;
+
+ if (ret > 0 && gen->istatus == CBS_EXCEPTION) {
+ gru_get_cb_exception_detail(cb, &excdet);
+ snprintf(buf, size,
+ "GRU:%d exception: cb %p, opc %d, exopc %d, ecause 0x%x,"
+ "excdet0 0x%lx, excdet1 0x%x", smp_processor_id(),
+ gen, excdet.opc, excdet.exopc, excdet.ecause,
+ excdet.exceptdet0, excdet.exceptdet1);
+ } else {
+ snprintf(buf, size, "No exception");
+ }
+ return buf;
+}
+
+static int gru_wait_idle_or_exception(struct gru_control_block_status *gen)
+{
+ while (gen->istatus >= CBS_ACTIVE) {
+ cpu_relax();
+ barrier();
+ }
+ return gen->istatus;
+}
+
+static int gru_retry_exception(void *cb)
+{
+ struct gru_control_block_status *gen = (void *)cb;
+ struct control_block_extended_exc_detail excdet;
+ int retry = EXCEPTION_RETRY_LIMIT;
+
+ while (1) {
+ if (gru_wait_idle_or_exception(gen) == CBS_IDLE)
+ return CBS_IDLE;
+ if (gru_get_cb_message_queue_substatus(cb))
+ return CBS_EXCEPTION;
+ gru_get_cb_exception_detail(cb, &excdet);
+ if ((excdet.ecause & ~EXCEPTION_RETRY_BITS) ||
+ (excdet.cbrexecstatus & CBR_EXS_ABORT_OCC))
+ break;
+ if (retry-- == 0)
+ break;
+ gen->icmd = 1;
+ gru_flush_cache(gen);
+ }
+ return CBS_EXCEPTION;
+}
+
+int gru_check_status_proc(void *cb)
+{
+ struct gru_control_block_status *gen = (void *)cb;
+ int ret;
+
+ ret = gen->istatus;
+ if (ret == CBS_EXCEPTION)
+ ret = gru_retry_exception(cb);
+ rmb();
+ return ret;
+
+}
+
+int gru_wait_proc(void *cb)
+{
+ struct gru_control_block_status *gen = (void *)cb;
+ int ret;
+
+ ret = gru_wait_idle_or_exception(gen);
+ if (ret == CBS_EXCEPTION)
+ ret = gru_retry_exception(cb);
+ rmb();
+ return ret;
+}
+
+void gru_abort(int ret, void *cb, char *str)
+{
+ char buf[GRU_EXC_STR_SIZE];
+
+ panic("GRU FATAL ERROR: %s - %s\n", str,
+ gru_get_cb_exception_detail_str(ret, cb, buf, sizeof(buf)));
+}
+
+void gru_wait_abort_proc(void *cb)
+{
+ int ret;
+
+ ret = gru_wait_proc(cb);
+ if (ret)
+ gru_abort(ret, cb, "gru_wait_abort");
+}
+
+
+/*------------------------------ MESSAGE QUEUES -----------------------------*/
+
+/* Internal status . These are NOT returned to the user. */
+#define MQIE_AGAIN -1 /* try again */
+
+
+/*
+ * Save/restore the "present" flag that is in the second line of 2-line
+ * messages
+ */
+static inline int get_present2(void *p)
+{
+ struct message_header *mhdr = p + GRU_CACHE_LINE_BYTES;
+ return mhdr->present;
+}
+
+static inline void restore_present2(void *p, int val)
+{
+ struct message_header *mhdr = p + GRU_CACHE_LINE_BYTES;
+ mhdr->present = val;
+}
+
+/*
+ * Create a message queue.
+ * qlines - message queue size in cache lines. Includes 2-line header.
+ */
+int gru_create_message_queue(struct gru_message_queue_desc *mqd,
+ void *p, unsigned int bytes, int nasid, int vector, int apicid)
+{
+ struct message_queue *mq = p;
+ unsigned int qlines;
+
+ qlines = bytes / GRU_CACHE_LINE_BYTES - 2;
+ memset(mq, 0, bytes);
+ mq->start = &mq->data;
+ mq->start2 = &mq->data + (qlines / 2 - 1) * GRU_CACHE_LINE_BYTES;
+ mq->next = &mq->data;
+ mq->limit = &mq->data + (qlines - 2) * GRU_CACHE_LINE_BYTES;
+ mq->qlines = qlines;
+ mq->hstatus[0] = 0;
+ mq->hstatus[1] = 1;
+ mq->head = gru_mesq_head(2, qlines / 2 + 1);
+ mqd->mq = mq;
+ mqd->mq_gpa = uv_gpa(mq);
+ mqd->qlines = qlines;
+ mqd->interrupt_pnode = nasid >> 1;
+ mqd->interrupt_vector = vector;
+ mqd->interrupt_apicid = apicid;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gru_create_message_queue);
+
+/*
+ * Send a NOOP message to a message queue
+ * Returns:
+ * 0 - if queue is full after the send. This is the normal case
+ * but various races can change this.
+ * -1 - if mesq sent successfully but queue not full
+ * >0 - unexpected error. MQE_xxx returned
+ */
+static int send_noop_message(void *cb, struct gru_message_queue_desc *mqd,
+ void *mesg)
+{
+ const struct message_header noop_header = {
+ .present = MQS_NOOP, .lines = 1};
+ unsigned long m;
+ int substatus, ret;
+ struct message_header save_mhdr, *mhdr = mesg;
+
+ STAT(mesq_noop);
+ save_mhdr = *mhdr;
+ *mhdr = noop_header;
+ gru_mesq(cb, mqd->mq_gpa, gru_get_tri(mhdr), 1, IMA);
+ ret = gru_wait(cb);
+
+ if (ret) {
+ substatus = gru_get_cb_message_queue_substatus(cb);
+ switch (substatus) {
+ case CBSS_NO_ERROR:
+ STAT(mesq_noop_unexpected_error);
+ ret = MQE_UNEXPECTED_CB_ERR;
+ break;
+ case CBSS_LB_OVERFLOWED:
+ STAT(mesq_noop_lb_overflow);
+ ret = MQE_CONGESTION;
+ break;
+ case CBSS_QLIMIT_REACHED:
+ STAT(mesq_noop_qlimit_reached);
+ ret = 0;
+ break;
+ case CBSS_AMO_NACKED:
+ STAT(mesq_noop_amo_nacked);
+ ret = MQE_CONGESTION;
+ break;
+ case CBSS_PUT_NACKED:
+ STAT(mesq_noop_put_nacked);
+ m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6);
+ gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, 1, 1,
+ IMA);
+ if (gru_wait(cb) == CBS_IDLE)
+ ret = MQIE_AGAIN;
+ else
+ ret = MQE_UNEXPECTED_CB_ERR;
+ break;
+ case CBSS_PAGE_OVERFLOW:
+ STAT(mesq_noop_page_overflow);
+ /* fallthru */
+ default:
+ BUG();
+ }
+ }
+ *mhdr = save_mhdr;
+ return ret;
+}
+
+/*
+ * Handle a gru_mesq full.
+ */
+static int send_message_queue_full(void *cb, struct gru_message_queue_desc *mqd,
+ void *mesg, int lines)
+{
+ union gru_mesqhead mqh;
+ unsigned int limit, head;
+ unsigned long avalue;
+ int half, qlines;
+
+ /* Determine if switching to first/second half of q */
+ avalue = gru_get_amo_value(cb);
+ head = gru_get_amo_value_head(cb);
+ limit = gru_get_amo_value_limit(cb);
+
+ qlines = mqd->qlines;
+ half = (limit != qlines);
+
+ if (half)
+ mqh = gru_mesq_head(qlines / 2 + 1, qlines);
+ else
+ mqh = gru_mesq_head(2, qlines / 2 + 1);
+
+ /* Try to get lock for switching head pointer */
+ gru_gamir(cb, EOP_IR_CLR, HSTATUS(mqd->mq_gpa, half), XTYPE_DW, IMA);
+ if (gru_wait(cb) != CBS_IDLE)
+ goto cberr;
+ if (!gru_get_amo_value(cb)) {
+ STAT(mesq_qf_locked);
+ return MQE_QUEUE_FULL;
+ }
+
+ /* Got the lock. Send optional NOP if queue not full, */
+ if (head != limit) {
+ if (send_noop_message(cb, mqd, mesg)) {
+ gru_gamir(cb, EOP_IR_INC, HSTATUS(mqd->mq_gpa, half),
+ XTYPE_DW, IMA);
+ if (gru_wait(cb) != CBS_IDLE)
+ goto cberr;
+ STAT(mesq_qf_noop_not_full);
+ return MQIE_AGAIN;
+ }
+ avalue++;
+ }
+
+ /* Then flip queuehead to other half of queue. */
+ gru_gamer(cb, EOP_ERR_CSWAP, mqd->mq_gpa, XTYPE_DW, mqh.val, avalue,
+ IMA);
+ if (gru_wait(cb) != CBS_IDLE)
+ goto cberr;
+
+ /* If not successfully in swapping queue head, clear the hstatus lock */
+ if (gru_get_amo_value(cb) != avalue) {
+ STAT(mesq_qf_switch_head_failed);
+ gru_gamir(cb, EOP_IR_INC, HSTATUS(mqd->mq_gpa, half), XTYPE_DW,
+ IMA);
+ if (gru_wait(cb) != CBS_IDLE)
+ goto cberr;
+ }
+ return MQIE_AGAIN;
+cberr:
+ STAT(mesq_qf_unexpected_error);
+ return MQE_UNEXPECTED_CB_ERR;
+}
+
+/*
+ * Handle a PUT failure. Note: if message was a 2-line message, one of the
+ * lines might have successfully have been written. Before sending the
+ * message, "present" must be cleared in BOTH lines to prevent the receiver
+ * from prematurely seeing the full message.
+ */
+static int send_message_put_nacked(void *cb, struct gru_message_queue_desc *mqd,
+ void *mesg, int lines)
+{
+ unsigned long m, *val = mesg, gpa, save;
+ int ret;
+
+ m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6);
+ if (lines == 2) {
+ gru_vset(cb, m, 0, XTYPE_CL, lines, 1, IMA);
+ if (gru_wait(cb) != CBS_IDLE)
+ return MQE_UNEXPECTED_CB_ERR;
+ }
+ gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA);
+ if (gru_wait(cb) != CBS_IDLE)
+ return MQE_UNEXPECTED_CB_ERR;
+
+ if (!mqd->interrupt_vector)
+ return MQE_OK;
+
+ /*
+ * Send a cross-partition interrupt to the SSI that contains the target
+ * message queue. Normally, the interrupt is automatically delivered by
+ * hardware but some error conditions require explicit delivery.
+ * Use the GRU to deliver the interrupt. Otherwise partition failures
+ * could cause unrecovered errors.
+ */
+ gpa = uv_global_gru_mmr_address(mqd->interrupt_pnode, UVH_IPI_INT);
+ save = *val;
+ *val = uv_hub_ipi_value(mqd->interrupt_apicid, mqd->interrupt_vector,
+ dest_Fixed);
+ gru_vstore_phys(cb, gpa, gru_get_tri(mesg), IAA_REGISTER, IMA);
+ ret = gru_wait(cb);
+ *val = save;
+ if (ret != CBS_IDLE)
+ return MQE_UNEXPECTED_CB_ERR;
+ return MQE_OK;
+}
+
+/*
+ * Handle a gru_mesq failure. Some of these failures are software recoverable
+ * or retryable.
+ */
+static int send_message_failure(void *cb, struct gru_message_queue_desc *mqd,
+ void *mesg, int lines)
+{
+ int substatus, ret = 0;
+
+ substatus = gru_get_cb_message_queue_substatus(cb);
+ switch (substatus) {
+ case CBSS_NO_ERROR:
+ STAT(mesq_send_unexpected_error);
+ ret = MQE_UNEXPECTED_CB_ERR;
+ break;
+ case CBSS_LB_OVERFLOWED:
+ STAT(mesq_send_lb_overflow);
+ ret = MQE_CONGESTION;
+ break;
+ case CBSS_QLIMIT_REACHED:
+ STAT(mesq_send_qlimit_reached);
+ ret = send_message_queue_full(cb, mqd, mesg, lines);
+ break;
+ case CBSS_AMO_NACKED:
+ STAT(mesq_send_amo_nacked);
+ ret = MQE_CONGESTION;
+ break;
+ case CBSS_PUT_NACKED:
+ STAT(mesq_send_put_nacked);
+ ret = send_message_put_nacked(cb, mqd, mesg, lines);
+ break;
+ case CBSS_PAGE_OVERFLOW:
+ STAT(mesq_page_overflow);
+ /* fallthru */
+ default:
+ BUG();
+ }
+ return ret;
+}
+
+/*
+ * Send a message to a message queue
+ * mqd message queue descriptor
+ * mesg message. ust be vaddr within a GSEG
+ * bytes message size (<= 2 CL)
+ */
+int gru_send_message_gpa(struct gru_message_queue_desc *mqd, void *mesg,
+ unsigned int bytes)
+{
+ struct message_header *mhdr;
+ void *cb;
+ void *dsr;
+ int istatus, clines, ret;
+
+ STAT(mesq_send);
+ BUG_ON(bytes < sizeof(int) || bytes > 2 * GRU_CACHE_LINE_BYTES);
+
+ clines = DIV_ROUND_UP(bytes, GRU_CACHE_LINE_BYTES);
+ if (gru_get_cpu_resources(bytes, &cb, &dsr))
+ return MQE_BUG_NO_RESOURCES;
+ memcpy(dsr, mesg, bytes);
+ mhdr = dsr;
+ mhdr->present = MQS_FULL;
+ mhdr->lines = clines;
+ if (clines == 2) {
+ mhdr->present2 = get_present2(mhdr);
+ restore_present2(mhdr, MQS_FULL);
+ }
+
+ do {
+ ret = MQE_OK;
+ gru_mesq(cb, mqd->mq_gpa, gru_get_tri(mhdr), clines, IMA);
+ istatus = gru_wait(cb);
+ if (istatus != CBS_IDLE)
+ ret = send_message_failure(cb, mqd, dsr, clines);
+ } while (ret == MQIE_AGAIN);
+ gru_free_cpu_resources(cb, dsr);
+
+ if (ret)
+ STAT(mesq_send_failed);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gru_send_message_gpa);
+
+/*
+ * Advance the receive pointer for the queue to the next message.
+ */
+void gru_free_message(struct gru_message_queue_desc *mqd, void *mesg)
+{
+ struct message_queue *mq = mqd->mq;
+ struct message_header *mhdr = mq->next;
+ void *next, *pnext;
+ int half = -1;
+ int lines = mhdr->lines;
+
+ if (lines == 2)
+ restore_present2(mhdr, MQS_EMPTY);
+ mhdr->present = MQS_EMPTY;
+
+ pnext = mq->next;
+ next = pnext + GRU_CACHE_LINE_BYTES * lines;
+ if (next == mq->limit) {
+ next = mq->start;
+ half = 1;
+ } else if (pnext < mq->start2 && next >= mq->start2) {
+ half = 0;
+ }
+
+ if (half >= 0)
+ mq->hstatus[half] = 1;
+ mq->next = next;
+}
+EXPORT_SYMBOL_GPL(gru_free_message);
+
+/*
+ * Get next message from message queue. Return NULL if no message
+ * present. User must call next_message() to move to next message.
+ * rmq message queue
+ */
+void *gru_get_next_message(struct gru_message_queue_desc *mqd)
+{
+ struct message_queue *mq = mqd->mq;
+ struct message_header *mhdr = mq->next;
+ int present = mhdr->present;
+
+ /* skip NOOP messages */
+ while (present == MQS_NOOP) {
+ gru_free_message(mqd, mhdr);
+ mhdr = mq->next;
+ present = mhdr->present;
+ }
+
+ /* Wait for both halves of 2 line messages */
+ if (present == MQS_FULL && mhdr->lines == 2 &&
+ get_present2(mhdr) == MQS_EMPTY)
+ present = MQS_EMPTY;
+
+ if (!present) {
+ STAT(mesq_receive_none);
+ return NULL;
+ }
+
+ if (mhdr->lines == 2)
+ restore_present2(mhdr, mhdr->present2);
+
+ STAT(mesq_receive);
+ return mhdr;
+}
+EXPORT_SYMBOL_GPL(gru_get_next_message);
+
+/* ---------------------- GRU DATA COPY FUNCTIONS ---------------------------*/
+
+/*
+ * Load a DW from a global GPA. The GPA can be a memory or MMR address.
+ */
+int gru_read_gpa(unsigned long *value, unsigned long gpa)
+{
+ void *cb;
+ void *dsr;
+ int ret, iaa;
+
+ STAT(read_gpa);
+ if (gru_get_cpu_resources(GRU_NUM_KERNEL_DSR_BYTES, &cb, &dsr))
+ return MQE_BUG_NO_RESOURCES;
+ iaa = gpa >> 62;
+ gru_vload_phys(cb, gpa, gru_get_tri(dsr), iaa, IMA);
+ ret = gru_wait(cb);
+ if (ret == CBS_IDLE)
+ *value = *(unsigned long *)dsr;
+ gru_free_cpu_resources(cb, dsr);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gru_read_gpa);
+
+
+/*
+ * Copy a block of data using the GRU resources
+ */
+int gru_copy_gpa(unsigned long dest_gpa, unsigned long src_gpa,
+ unsigned int bytes)
+{
+ void *cb;
+ void *dsr;
+ int ret;
+
+ STAT(copy_gpa);
+ if (gru_get_cpu_resources(GRU_NUM_KERNEL_DSR_BYTES, &cb, &dsr))
+ return MQE_BUG_NO_RESOURCES;
+ gru_bcopy(cb, src_gpa, dest_gpa, gru_get_tri(dsr),
+ XTYPE_B, bytes, GRU_NUM_KERNEL_DSR_CL, IMA);
+ ret = gru_wait(cb);
+ gru_free_cpu_resources(cb, dsr);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gru_copy_gpa);
+
+/* ------------------- KERNEL QUICKTESTS RUN AT STARTUP ----------------*/
+/* Temp - will delete after we gain confidence in the GRU */
+
+static int quicktest0(unsigned long arg)
+{
+ unsigned long word0;
+ unsigned long word1;
+ void *cb;
+ void *dsr;
+ unsigned long *p;
+ int ret = -EIO;
+
+ if (gru_get_cpu_resources(GRU_CACHE_LINE_BYTES, &cb, &dsr))
+ return MQE_BUG_NO_RESOURCES;
+ p = dsr;
+ word0 = MAGIC;
+ word1 = 0;
+
+ gru_vload(cb, uv_gpa(&word0), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA);
+ if (gru_wait(cb) != CBS_IDLE) {
+ printk(KERN_DEBUG "GRU:%d quicktest0: CBR failure 1\n", smp_processor_id());
+ goto done;
+ }
+
+ if (*p != MAGIC) {
+ printk(KERN_DEBUG "GRU:%d quicktest0 bad magic 0x%lx\n", smp_processor_id(), *p);
+ goto done;
+ }
+ gru_vstore(cb, uv_gpa(&word1), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA);
+ if (gru_wait(cb) != CBS_IDLE) {
+ printk(KERN_DEBUG "GRU:%d quicktest0: CBR failure 2\n", smp_processor_id());
+ goto done;
+ }
+
+ if (word0 != word1 || word1 != MAGIC) {
+ printk(KERN_DEBUG
+ "GRU:%d quicktest0 err: found 0x%lx, expected 0x%lx\n",
+ smp_processor_id(), word1, MAGIC);
+ goto done;
+ }
+ ret = 0;
+
+done:
+ gru_free_cpu_resources(cb, dsr);
+ return ret;
+}
+
+#define ALIGNUP(p, q) ((void *)(((unsigned long)(p) + (q) - 1) & ~(q - 1)))
+
+static int quicktest1(unsigned long arg)
+{
+ struct gru_message_queue_desc mqd;
+ void *p, *mq;
+ unsigned long *dw;
+ int i, ret = -EIO;
+ char mes[GRU_CACHE_LINE_BYTES], *m;
+
+ /* Need 1K cacheline aligned that does not cross page boundary */
+ p = kmalloc(4096, 0);
+ if (p == NULL)
+ return -ENOMEM;
+ mq = ALIGNUP(p, 1024);
+ memset(mes, 0xee, sizeof(mes));
+ dw = mq;
+
+ gru_create_message_queue(&mqd, mq, 8 * GRU_CACHE_LINE_BYTES, 0, 0, 0);
+ for (i = 0; i < 6; i++) {
+ mes[8] = i;
+ do {
+ ret = gru_send_message_gpa(&mqd, mes, sizeof(mes));
+ } while (ret == MQE_CONGESTION);
+ if (ret)
+ break;
+ }
+ if (ret != MQE_QUEUE_FULL || i != 4) {
+ printk(KERN_DEBUG "GRU:%d quicktest1: unexpect status %d, i %d\n",
+ smp_processor_id(), ret, i);
+ goto done;
+ }
+
+ for (i = 0; i < 6; i++) {
+ m = gru_get_next_message(&mqd);
+ if (!m || m[8] != i)
+ break;
+ gru_free_message(&mqd, m);
+ }
+ if (i != 4) {
+ printk(KERN_DEBUG "GRU:%d quicktest2: bad message, i %d, m %p, m8 %d\n",
+ smp_processor_id(), i, m, m ? m[8] : -1);
+ goto done;
+ }
+ ret = 0;
+
+done:
+ kfree(p);
+ return ret;
+}
+
+static int quicktest2(unsigned long arg)
+{
+ static DECLARE_COMPLETION(cmp);
+ unsigned long han;
+ int blade_id = 0;
+ int numcb = 4;
+ int ret = 0;
+ unsigned long *buf;
+ void *cb0, *cb;
+ struct gru_control_block_status *gen;
+ int i, k, istatus, bytes;
+
+ bytes = numcb * 4 * 8;
+ buf = kmalloc(bytes, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = -EBUSY;
+ han = gru_reserve_async_resources(blade_id, numcb, 0, &cmp);
+ if (!han)
+ goto done;
+
+ gru_lock_async_resource(han, &cb0, NULL);
+ memset(buf, 0xee, bytes);
+ for (i = 0; i < numcb; i++)
+ gru_vset(cb0 + i * GRU_HANDLE_STRIDE, uv_gpa(&buf[i * 4]), 0,
+ XTYPE_DW, 4, 1, IMA_INTERRUPT);
+
+ ret = 0;
+ k = numcb;
+ do {
+ gru_wait_async_cbr(han);
+ for (i = 0; i < numcb; i++) {
+ cb = cb0 + i * GRU_HANDLE_STRIDE;
+ istatus = gru_check_status(cb);
+ if (istatus != CBS_ACTIVE && istatus != CBS_CALL_OS)
+ break;
+ }
+ if (i == numcb)
+ continue;
+ if (istatus != CBS_IDLE) {
+ printk(KERN_DEBUG "GRU:%d quicktest2: cb %d, exception\n", smp_processor_id(), i);
+ ret = -EFAULT;
+ } else if (buf[4 * i] || buf[4 * i + 1] || buf[4 * i + 2] ||
+ buf[4 * i + 3]) {
+ printk(KERN_DEBUG "GRU:%d quicktest2:cb %d, buf 0x%lx, 0x%lx, 0x%lx, 0x%lx\n",
+ smp_processor_id(), i, buf[4 * i], buf[4 * i + 1], buf[4 * i + 2], buf[4 * i + 3]);
+ ret = -EIO;
+ }
+ k--;
+ gen = cb;
+ gen->istatus = CBS_CALL_OS; /* don't handle this CBR again */
+ } while (k);
+ BUG_ON(cmp.done);
+
+ gru_unlock_async_resource(han);
+ gru_release_async_resources(han);
+done:
+ kfree(buf);
+ return ret;
+}
+
+#define BUFSIZE 200
+static int quicktest3(unsigned long arg)
+{
+ char buf1[BUFSIZE], buf2[BUFSIZE];
+ int ret = 0;
+
+ memset(buf2, 0, sizeof(buf2));
+ memset(buf1, get_cycles() & 255, sizeof(buf1));
+ gru_copy_gpa(uv_gpa(buf2), uv_gpa(buf1), BUFSIZE);
+ if (memcmp(buf1, buf2, BUFSIZE)) {
+ printk(KERN_DEBUG "GRU:%d quicktest3 error\n", smp_processor_id());
+ ret = -EIO;
+ }
+ return ret;
+}
+
+/*
+ * Debugging only. User hook for various kernel tests
+ * of driver & gru.
+ */
+int gru_ktest(unsigned long arg)
+{
+ int ret = -EINVAL;
+
+ switch (arg & 0xff) {
+ case 0:
+ ret = quicktest0(arg);
+ break;
+ case 1:
+ ret = quicktest1(arg);
+ break;
+ case 2:
+ ret = quicktest2(arg);
+ break;
+ case 3:
+ ret = quicktest3(arg);
+ break;
+ case 99:
+ ret = gru_free_kernel_contexts();
+ break;
+ }
+ return ret;
+
+}
+
+int gru_kservices_init(void)
+{
+ return 0;
+}
+
+void gru_kservices_exit(void)
+{
+ if (gru_free_kernel_contexts())
+ BUG();
+}
+
diff --git a/drivers/misc/sgi-gru/grukservices.h b/drivers/misc/sgi-gru/grukservices.h
new file mode 100644
index 00000000..02aa94d8
--- /dev/null
+++ b/drivers/misc/sgi-gru/grukservices.h
@@ -0,0 +1,214 @@
+
+/*
+ * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef __GRU_KSERVICES_H_
+#define __GRU_KSERVICES_H_
+
+
+/*
+ * Message queues using the GRU to send/receive messages.
+ *
+ * These function allow the user to create a message queue for
+ * sending/receiving 1 or 2 cacheline messages using the GRU.
+ *
+ * Processes SENDING messages will use a kernel CBR/DSR to send
+ * the message. This is transparent to the caller.
+ *
+ * The receiver does not use any GRU resources.
+ *
+ * The functions support:
+ * - single receiver
+ * - multiple senders
+ * - cross partition message
+ *
+ * Missing features ZZZ:
+ * - user options for dealing with timeouts, queue full, etc.
+ * - gru_create_message_queue() needs interrupt vector info
+ */
+
+struct gru_message_queue_desc {
+ void *mq; /* message queue vaddress */
+ unsigned long mq_gpa; /* global address of mq */
+ int qlines; /* queue size in CL */
+ int interrupt_vector; /* interrupt vector */
+ int interrupt_pnode; /* pnode for interrupt */
+ int interrupt_apicid; /* lapicid for interrupt */
+};
+
+/*
+ * Initialize a user allocated chunk of memory to be used as
+ * a message queue. The caller must ensure that the queue is
+ * in contiguous physical memory and is cacheline aligned.
+ *
+ * Message queue size is the total number of bytes allocated
+ * to the queue including a 2 cacheline header that is used
+ * to manage the queue.
+ *
+ * Input:
+ * mqd pointer to message queue descriptor
+ * p pointer to user allocated mesq memory.
+ * bytes size of message queue in bytes
+ * vector interrupt vector (zero if no interrupts)
+ * nasid nasid of blade where interrupt is delivered
+ * apicid apicid of cpu for interrupt
+ *
+ * Errors:
+ * 0 OK
+ * >0 error
+ */
+extern int gru_create_message_queue(struct gru_message_queue_desc *mqd,
+ void *p, unsigned int bytes, int nasid, int vector, int apicid);
+
+/*
+ * Send a message to a message queue.
+ *
+ * Note: The message queue transport mechanism uses the first 32
+ * bits of the message. Users should avoid using these bits.
+ *
+ *
+ * Input:
+ * mqd pointer to message queue descriptor
+ * mesg pointer to message. Must be 64-bit aligned
+ * bytes size of message in bytes
+ *
+ * Output:
+ * 0 message sent
+ * >0 Send failure - see error codes below
+ *
+ */
+extern int gru_send_message_gpa(struct gru_message_queue_desc *mqd,
+ void *mesg, unsigned int bytes);
+
+/* Status values for gru_send_message() */
+#define MQE_OK 0 /* message sent successfully */
+#define MQE_CONGESTION 1 /* temporary congestion, try again */
+#define MQE_QUEUE_FULL 2 /* queue is full */
+#define MQE_UNEXPECTED_CB_ERR 3 /* unexpected CB error */
+#define MQE_PAGE_OVERFLOW 10 /* BUG - queue overflowed a page */
+#define MQE_BUG_NO_RESOURCES 11 /* BUG - could not alloc GRU cb/dsr */
+
+/*
+ * Advance the receive pointer for the message queue to the next message.
+ * Note: current API requires messages to be gotten & freed in order. Future
+ * API extensions may allow for out-of-order freeing.
+ *
+ * Input
+ * mqd pointer to message queue descriptor
+ * mesq message being freed
+ */
+extern void gru_free_message(struct gru_message_queue_desc *mqd,
+ void *mesq);
+
+/*
+ * Get next message from message queue. Returns pointer to
+ * message OR NULL if no message present.
+ * User must call gru_free_message() after message is processed
+ * in order to move the queue pointers to next message.
+ *
+ * Input
+ * mqd pointer to message queue descriptor
+ *
+ * Output:
+ * p pointer to message
+ * NULL no message available
+ */
+extern void *gru_get_next_message(struct gru_message_queue_desc *mqd);
+
+
+/*
+ * Read a GRU global GPA. Source can be located in a remote partition.
+ *
+ * Input:
+ * value memory address where MMR value is returned
+ * gpa source numalink physical address of GPA
+ *
+ * Output:
+ * 0 OK
+ * >0 error
+ */
+int gru_read_gpa(unsigned long *value, unsigned long gpa);
+
+
+/*
+ * Copy data using the GRU. Source or destination can be located in a remote
+ * partition.
+ *
+ * Input:
+ * dest_gpa destination global physical address
+ * src_gpa source global physical address
+ * bytes number of bytes to copy
+ *
+ * Output:
+ * 0 OK
+ * >0 error
+ */
+extern int gru_copy_gpa(unsigned long dest_gpa, unsigned long src_gpa,
+ unsigned int bytes);
+
+/*
+ * Reserve GRU resources to be used asynchronously.
+ *
+ * input:
+ * blade_id - blade on which resources should be reserved
+ * cbrs - number of CBRs
+ * dsr_bytes - number of DSR bytes needed
+ * cmp - completion structure for waiting for
+ * async completions
+ * output:
+ * handle to identify resource
+ * (0 = no resources)
+ */
+extern unsigned long gru_reserve_async_resources(int blade_id, int cbrs, int dsr_bytes,
+ struct completion *cmp);
+
+/*
+ * Release async resources previously reserved.
+ *
+ * input:
+ * han - handle to identify resources
+ */
+extern void gru_release_async_resources(unsigned long han);
+
+/*
+ * Wait for async GRU instructions to complete.
+ *
+ * input:
+ * han - handle to identify resources
+ */
+extern void gru_wait_async_cbr(unsigned long han);
+
+/*
+ * Lock previous reserved async GRU resources
+ *
+ * input:
+ * han - handle to identify resources
+ * output:
+ * cb - pointer to first CBR
+ * dsr - pointer to first DSR
+ */
+extern void gru_lock_async_resource(unsigned long han, void **cb, void **dsr);
+
+/*
+ * Unlock previous reserved async GRU resources
+ *
+ * input:
+ * han - handle to identify resources
+ */
+extern void gru_unlock_async_resource(unsigned long han);
+
+#endif /* __GRU_KSERVICES_H_ */
diff --git a/drivers/misc/sgi-gru/grulib.h b/drivers/misc/sgi-gru/grulib.h
new file mode 100644
index 00000000..e77d1b1f
--- /dev/null
+++ b/drivers/misc/sgi-gru/grulib.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __GRULIB_H__
+#define __GRULIB_H__
+
+#define GRU_BASENAME "gru"
+#define GRU_FULLNAME "/dev/gru"
+#define GRU_IOCTL_NUM 'G'
+
+/*
+ * Maximum number of GRU segments that a user can have open
+ * ZZZ temp - set high for testing. Revisit.
+ */
+#define GRU_MAX_OPEN_CONTEXTS 32
+
+/* Set Number of Request Blocks */
+#define GRU_CREATE_CONTEXT _IOWR(GRU_IOCTL_NUM, 1, void *)
+
+/* Set Context Options */
+#define GRU_SET_CONTEXT_OPTION _IOWR(GRU_IOCTL_NUM, 4, void *)
+
+/* Fetch exception detail */
+#define GRU_USER_GET_EXCEPTION_DETAIL _IOWR(GRU_IOCTL_NUM, 6, void *)
+
+/* For user call_os handling - normally a TLB fault */
+#define GRU_USER_CALL_OS _IOWR(GRU_IOCTL_NUM, 8, void *)
+
+/* For user unload context */
+#define GRU_USER_UNLOAD_CONTEXT _IOWR(GRU_IOCTL_NUM, 9, void *)
+
+/* For dumpping GRU chiplet state */
+#define GRU_DUMP_CHIPLET_STATE _IOWR(GRU_IOCTL_NUM, 11, void *)
+
+/* For getting gseg statistics */
+#define GRU_GET_GSEG_STATISTICS _IOWR(GRU_IOCTL_NUM, 12, void *)
+
+/* For user TLB flushing (primarily for tests) */
+#define GRU_USER_FLUSH_TLB _IOWR(GRU_IOCTL_NUM, 50, void *)
+
+/* Get some config options (primarily for tests & emulator) */
+#define GRU_GET_CONFIG_INFO _IOWR(GRU_IOCTL_NUM, 51, void *)
+
+/* Various kernel self-tests */
+#define GRU_KTEST _IOWR(GRU_IOCTL_NUM, 52, void *)
+
+#define CONTEXT_WINDOW_BYTES(th) (GRU_GSEG_PAGESIZE * (th))
+#define THREAD_POINTER(p, th) (p + GRU_GSEG_PAGESIZE * (th))
+#define GSEG_START(cb) ((void *)((unsigned long)(cb) & ~(GRU_GSEG_PAGESIZE - 1)))
+
+struct gru_get_gseg_statistics_req {
+ unsigned long gseg;
+ struct gru_gseg_statistics stats;
+};
+
+/*
+ * Structure used to pass TLB flush parameters to the driver
+ */
+struct gru_create_context_req {
+ unsigned long gseg;
+ unsigned int data_segment_bytes;
+ unsigned int control_blocks;
+ unsigned int maximum_thread_count;
+ unsigned int options;
+ unsigned char tlb_preload_count;
+};
+
+/*
+ * Structure used to pass unload context parameters to the driver
+ */
+struct gru_unload_context_req {
+ unsigned long gseg;
+};
+
+/*
+ * Structure used to set context options
+ */
+enum {sco_gseg_owner, sco_cch_req_slice, sco_blade_chiplet};
+struct gru_set_context_option_req {
+ unsigned long gseg;
+ int op;
+ int val0;
+ long val1;
+};
+
+/*
+ * Structure used to pass TLB flush parameters to the driver
+ */
+struct gru_flush_tlb_req {
+ unsigned long gseg;
+ unsigned long vaddr;
+ size_t len;
+};
+
+/*
+ * Structure used to pass TLB flush parameters to the driver
+ */
+enum {dcs_pid, dcs_gid};
+struct gru_dump_chiplet_state_req {
+ unsigned int op;
+ unsigned int gid;
+ int ctxnum;
+ char data_opt;
+ char lock_cch;
+ char flush_cbrs;
+ char fill[10];
+ pid_t pid;
+ void *buf;
+ size_t buflen;
+ /* ---- output --- */
+ unsigned int num_contexts;
+};
+
+#define GRU_DUMP_MAGIC 0x3474ab6c
+struct gru_dump_context_header {
+ unsigned int magic;
+ unsigned int gid;
+ unsigned char ctxnum;
+ unsigned char cbrcnt;
+ unsigned char dsrcnt;
+ pid_t pid;
+ unsigned long vaddr;
+ int cch_locked;
+ unsigned long data[0];
+};
+
+/*
+ * GRU configuration info (temp - for testing)
+ */
+struct gru_config_info {
+ int cpus;
+ int blades;
+ int nodes;
+ int chiplets;
+ int fill[16];
+};
+
+#endif /* __GRULIB_H__ */
diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c
new file mode 100644
index 00000000..ae16c8cb
--- /dev/null
+++ b/drivers/misc/sgi-gru/grumain.c
@@ -0,0 +1,973 @@
+/*
+ * SN Platform GRU Driver
+ *
+ * DRIVER TABLE MANAGER + GRU CONTEXT LOAD/UNLOAD
+ *
+ * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/err.h>
+#include <linux/prefetch.h>
+#include <asm/uv/uv_hub.h>
+#include "gru.h"
+#include "grutables.h"
+#include "gruhandles.h"
+
+unsigned long gru_options __read_mostly;
+
+static struct device_driver gru_driver = {
+ .name = "gru"
+};
+
+static struct device gru_device = {
+ .init_name = "",
+ .driver = &gru_driver,
+};
+
+struct device *grudev = &gru_device;
+
+/*
+ * Select a gru fault map to be used by the current cpu. Note that
+ * multiple cpus may be using the same map.
+ * ZZZ should be inline but did not work on emulator
+ */
+int gru_cpu_fault_map_id(void)
+{
+#ifdef CONFIG_IA64
+ return uv_blade_processor_id() % GRU_NUM_TFM;
+#else
+ int cpu = smp_processor_id();
+ int id, core;
+
+ core = uv_cpu_core_number(cpu);
+ id = core + UV_MAX_INT_CORES * uv_cpu_socket_number(cpu);
+ return id;
+#endif
+}
+
+/*--------- ASID Management -------------------------------------------
+ *
+ * Initially, assign asids sequentially from MIN_ASID .. MAX_ASID.
+ * Once MAX is reached, flush the TLB & start over. However,
+ * some asids may still be in use. There won't be many (percentage wise) still
+ * in use. Search active contexts & determine the value of the first
+ * asid in use ("x"s below). Set "limit" to this value.
+ * This defines a block of assignable asids.
+ *
+ * When "limit" is reached, search forward from limit+1 and determine the
+ * next block of assignable asids.
+ *
+ * Repeat until MAX_ASID is reached, then start over again.
+ *
+ * Each time MAX_ASID is reached, increment the asid generation. Since
+ * the search for in-use asids only checks contexts with GRUs currently
+ * assigned, asids in some contexts will be missed. Prior to loading
+ * a context, the asid generation of the GTS asid is rechecked. If it
+ * doesn't match the current generation, a new asid will be assigned.
+ *
+ * 0---------------x------------x---------------------x----|
+ * ^-next ^-limit ^-MAX_ASID
+ *
+ * All asid manipulation & context loading/unloading is protected by the
+ * gs_lock.
+ */
+
+/* Hit the asid limit. Start over */
+static int gru_wrap_asid(struct gru_state *gru)
+{
+ gru_dbg(grudev, "gid %d\n", gru->gs_gid);
+ STAT(asid_wrap);
+ gru->gs_asid_gen++;
+ return MIN_ASID;
+}
+
+/* Find the next chunk of unused asids */
+static int gru_reset_asid_limit(struct gru_state *gru, int asid)
+{
+ int i, gid, inuse_asid, limit;
+
+ gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid);
+ STAT(asid_next);
+ limit = MAX_ASID;
+ if (asid >= limit)
+ asid = gru_wrap_asid(gru);
+ gru_flush_all_tlb(gru);
+ gid = gru->gs_gid;
+again:
+ for (i = 0; i < GRU_NUM_CCH; i++) {
+ if (!gru->gs_gts[i] || is_kernel_context(gru->gs_gts[i]))
+ continue;
+ inuse_asid = gru->gs_gts[i]->ts_gms->ms_asids[gid].mt_asid;
+ gru_dbg(grudev, "gid %d, gts %p, gms %p, inuse 0x%x, cxt %d\n",
+ gru->gs_gid, gru->gs_gts[i], gru->gs_gts[i]->ts_gms,
+ inuse_asid, i);
+ if (inuse_asid == asid) {
+ asid += ASID_INC;
+ if (asid >= limit) {
+ /*
+ * empty range: reset the range limit and
+ * start over
+ */
+ limit = MAX_ASID;
+ if (asid >= MAX_ASID)
+ asid = gru_wrap_asid(gru);
+ goto again;
+ }
+ }
+
+ if ((inuse_asid > asid) && (inuse_asid < limit))
+ limit = inuse_asid;
+ }
+ gru->gs_asid_limit = limit;
+ gru->gs_asid = asid;
+ gru_dbg(grudev, "gid %d, new asid 0x%x, new_limit 0x%x\n", gru->gs_gid,
+ asid, limit);
+ return asid;
+}
+
+/* Assign a new ASID to a thread context. */
+static int gru_assign_asid(struct gru_state *gru)
+{
+ int asid;
+
+ gru->gs_asid += ASID_INC;
+ asid = gru->gs_asid;
+ if (asid >= gru->gs_asid_limit)
+ asid = gru_reset_asid_limit(gru, asid);
+
+ gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid);
+ return asid;
+}
+
+/*
+ * Clear n bits in a word. Return a word indicating the bits that were cleared.
+ * Optionally, build an array of chars that contain the bit numbers allocated.
+ */
+static unsigned long reserve_resources(unsigned long *p, int n, int mmax,
+ char *idx)
+{
+ unsigned long bits = 0;
+ int i;
+
+ while (n--) {
+ i = find_first_bit(p, mmax);
+ if (i == mmax)
+ BUG();
+ __clear_bit(i, p);
+ __set_bit(i, &bits);
+ if (idx)
+ *idx++ = i;
+ }
+ return bits;
+}
+
+unsigned long gru_reserve_cb_resources(struct gru_state *gru, int cbr_au_count,
+ char *cbmap)
+{
+ return reserve_resources(&gru->gs_cbr_map, cbr_au_count, GRU_CBR_AU,
+ cbmap);
+}
+
+unsigned long gru_reserve_ds_resources(struct gru_state *gru, int dsr_au_count,
+ char *dsmap)
+{
+ return reserve_resources(&gru->gs_dsr_map, dsr_au_count, GRU_DSR_AU,
+ dsmap);
+}
+
+static void reserve_gru_resources(struct gru_state *gru,
+ struct gru_thread_state *gts)
+{
+ gru->gs_active_contexts++;
+ gts->ts_cbr_map =
+ gru_reserve_cb_resources(gru, gts->ts_cbr_au_count,
+ gts->ts_cbr_idx);
+ gts->ts_dsr_map =
+ gru_reserve_ds_resources(gru, gts->ts_dsr_au_count, NULL);
+}
+
+static void free_gru_resources(struct gru_state *gru,
+ struct gru_thread_state *gts)
+{
+ gru->gs_active_contexts--;
+ gru->gs_cbr_map |= gts->ts_cbr_map;
+ gru->gs_dsr_map |= gts->ts_dsr_map;
+}
+
+/*
+ * Check if a GRU has sufficient free resources to satisfy an allocation
+ * request. Note: GRU locks may or may not be held when this is called. If
+ * not held, recheck after acquiring the appropriate locks.
+ *
+ * Returns 1 if sufficient resources, 0 if not
+ */
+static int check_gru_resources(struct gru_state *gru, int cbr_au_count,
+ int dsr_au_count, int max_active_contexts)
+{
+ return hweight64(gru->gs_cbr_map) >= cbr_au_count
+ && hweight64(gru->gs_dsr_map) >= dsr_au_count
+ && gru->gs_active_contexts < max_active_contexts;
+}
+
+/*
+ * TLB manangment requires tracking all GRU chiplets that have loaded a GSEG
+ * context.
+ */
+static int gru_load_mm_tracker(struct gru_state *gru,
+ struct gru_thread_state *gts)
+{
+ struct gru_mm_struct *gms = gts->ts_gms;
+ struct gru_mm_tracker *asids = &gms->ms_asids[gru->gs_gid];
+ unsigned short ctxbitmap = (1 << gts->ts_ctxnum);
+ int asid;
+
+ spin_lock(&gms->ms_asid_lock);
+ asid = asids->mt_asid;
+
+ spin_lock(&gru->gs_asid_lock);
+ if (asid == 0 || (asids->mt_ctxbitmap == 0 && asids->mt_asid_gen !=
+ gru->gs_asid_gen)) {
+ asid = gru_assign_asid(gru);
+ asids->mt_asid = asid;
+ asids->mt_asid_gen = gru->gs_asid_gen;
+ STAT(asid_new);
+ } else {
+ STAT(asid_reuse);
+ }
+ spin_unlock(&gru->gs_asid_lock);
+
+ BUG_ON(asids->mt_ctxbitmap & ctxbitmap);
+ asids->mt_ctxbitmap |= ctxbitmap;
+ if (!test_bit(gru->gs_gid, gms->ms_asidmap))
+ __set_bit(gru->gs_gid, gms->ms_asidmap);
+ spin_unlock(&gms->ms_asid_lock);
+
+ gru_dbg(grudev,
+ "gid %d, gts %p, gms %p, ctxnum %d, asid 0x%x, asidmap 0x%lx\n",
+ gru->gs_gid, gts, gms, gts->ts_ctxnum, asid,
+ gms->ms_asidmap[0]);
+ return asid;
+}
+
+static void gru_unload_mm_tracker(struct gru_state *gru,
+ struct gru_thread_state *gts)
+{
+ struct gru_mm_struct *gms = gts->ts_gms;
+ struct gru_mm_tracker *asids;
+ unsigned short ctxbitmap;
+
+ asids = &gms->ms_asids[gru->gs_gid];
+ ctxbitmap = (1 << gts->ts_ctxnum);
+ spin_lock(&gms->ms_asid_lock);
+ spin_lock(&gru->gs_asid_lock);
+ BUG_ON((asids->mt_ctxbitmap & ctxbitmap) != ctxbitmap);
+ asids->mt_ctxbitmap ^= ctxbitmap;
+ gru_dbg(grudev, "gid %d, gts %p, gms %p, ctxnum 0x%d, asidmap 0x%lx\n",
+ gru->gs_gid, gts, gms, gts->ts_ctxnum, gms->ms_asidmap[0]);
+ spin_unlock(&gru->gs_asid_lock);
+ spin_unlock(&gms->ms_asid_lock);
+}
+
+/*
+ * Decrement the reference count on a GTS structure. Free the structure
+ * if the reference count goes to zero.
+ */
+void gts_drop(struct gru_thread_state *gts)
+{
+ if (gts && atomic_dec_return(&gts->ts_refcnt) == 0) {
+ if (gts->ts_gms)
+ gru_drop_mmu_notifier(gts->ts_gms);
+ kfree(gts);
+ STAT(gts_free);
+ }
+}
+
+/*
+ * Locate the GTS structure for the current thread.
+ */
+static struct gru_thread_state *gru_find_current_gts_nolock(struct gru_vma_data
+ *vdata, int tsid)
+{
+ struct gru_thread_state *gts;
+
+ list_for_each_entry(gts, &vdata->vd_head, ts_next)
+ if (gts->ts_tsid == tsid)
+ return gts;
+ return NULL;
+}
+
+/*
+ * Allocate a thread state structure.
+ */
+struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
+ int cbr_au_count, int dsr_au_count,
+ unsigned char tlb_preload_count, int options, int tsid)
+{
+ struct gru_thread_state *gts;
+ struct gru_mm_struct *gms;
+ int bytes;
+
+ bytes = DSR_BYTES(dsr_au_count) + CBR_BYTES(cbr_au_count);
+ bytes += sizeof(struct gru_thread_state);
+ gts = kmalloc(bytes, GFP_KERNEL);
+ if (!gts)
+ return ERR_PTR(-ENOMEM);
+
+ STAT(gts_alloc);
+ memset(gts, 0, sizeof(struct gru_thread_state)); /* zero out header */
+ atomic_set(&gts->ts_refcnt, 1);
+ mutex_init(&gts->ts_ctxlock);
+ gts->ts_cbr_au_count = cbr_au_count;
+ gts->ts_dsr_au_count = dsr_au_count;
+ gts->ts_tlb_preload_count = tlb_preload_count;
+ gts->ts_user_options = options;
+ gts->ts_user_blade_id = -1;
+ gts->ts_user_chiplet_id = -1;
+ gts->ts_tsid = tsid;
+ gts->ts_ctxnum = NULLCTX;
+ gts->ts_tlb_int_select = -1;
+ gts->ts_cch_req_slice = -1;
+ gts->ts_sizeavail = GRU_SIZEAVAIL(PAGE_SHIFT);
+ if (vma) {
+ gts->ts_mm = current->mm;
+ gts->ts_vma = vma;
+ gms = gru_register_mmu_notifier();
+ if (IS_ERR(gms))
+ goto err;
+ gts->ts_gms = gms;
+ }
+
+ gru_dbg(grudev, "alloc gts %p\n", gts);
+ return gts;
+
+err:
+ gts_drop(gts);
+ return ERR_CAST(gms);
+}
+
+/*
+ * Allocate a vma private data structure.
+ */
+struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma, int tsid)
+{
+ struct gru_vma_data *vdata = NULL;
+
+ vdata = kmalloc(sizeof(*vdata), GFP_KERNEL);
+ if (!vdata)
+ return NULL;
+
+ STAT(vdata_alloc);
+ INIT_LIST_HEAD(&vdata->vd_head);
+ spin_lock_init(&vdata->vd_lock);
+ gru_dbg(grudev, "alloc vdata %p\n", vdata);
+ return vdata;
+}
+
+/*
+ * Find the thread state structure for the current thread.
+ */
+struct gru_thread_state *gru_find_thread_state(struct vm_area_struct *vma,
+ int tsid)
+{
+ struct gru_vma_data *vdata = vma->vm_private_data;
+ struct gru_thread_state *gts;
+
+ spin_lock(&vdata->vd_lock);
+ gts = gru_find_current_gts_nolock(vdata, tsid);
+ spin_unlock(&vdata->vd_lock);
+ gru_dbg(grudev, "vma %p, gts %p\n", vma, gts);
+ return gts;
+}
+
+/*
+ * Allocate a new thread state for a GSEG. Note that races may allow
+ * another thread to race to create a gts.
+ */
+struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct *vma,
+ int tsid)
+{
+ struct gru_vma_data *vdata = vma->vm_private_data;
+ struct gru_thread_state *gts, *ngts;
+
+ gts = gru_alloc_gts(vma, vdata->vd_cbr_au_count,
+ vdata->vd_dsr_au_count,
+ vdata->vd_tlb_preload_count,
+ vdata->vd_user_options, tsid);
+ if (IS_ERR(gts))
+ return gts;
+
+ spin_lock(&vdata->vd_lock);
+ ngts = gru_find_current_gts_nolock(vdata, tsid);
+ if (ngts) {
+ gts_drop(gts);
+ gts = ngts;
+ STAT(gts_double_allocate);
+ } else {
+ list_add(&gts->ts_next, &vdata->vd_head);
+ }
+ spin_unlock(&vdata->vd_lock);
+ gru_dbg(grudev, "vma %p, gts %p\n", vma, gts);
+ return gts;
+}
+
+/*
+ * Free the GRU context assigned to the thread state.
+ */
+static void gru_free_gru_context(struct gru_thread_state *gts)
+{
+ struct gru_state *gru;
+
+ gru = gts->ts_gru;
+ gru_dbg(grudev, "gts %p, gid %d\n", gts, gru->gs_gid);
+
+ spin_lock(&gru->gs_lock);
+ gru->gs_gts[gts->ts_ctxnum] = NULL;
+ free_gru_resources(gru, gts);
+ BUG_ON(test_bit(gts->ts_ctxnum, &gru->gs_context_map) == 0);
+ __clear_bit(gts->ts_ctxnum, &gru->gs_context_map);
+ gts->ts_ctxnum = NULLCTX;
+ gts->ts_gru = NULL;
+ gts->ts_blade = -1;
+ spin_unlock(&gru->gs_lock);
+
+ gts_drop(gts);
+ STAT(free_context);
+}
+
+/*
+ * Prefetching cachelines help hardware performance.
+ * (Strictly a performance enhancement. Not functionally required).
+ */
+static void prefetch_data(void *p, int num, int stride)
+{
+ while (num-- > 0) {
+ prefetchw(p);
+ p += stride;
+ }
+}
+
+static inline long gru_copy_handle(void *d, void *s)
+{
+ memcpy(d, s, GRU_HANDLE_BYTES);
+ return GRU_HANDLE_BYTES;
+}
+
+static void gru_prefetch_context(void *gseg, void *cb, void *cbe,
+ unsigned long cbrmap, unsigned long length)
+{
+ int i, scr;
+
+ prefetch_data(gseg + GRU_DS_BASE, length / GRU_CACHE_LINE_BYTES,
+ GRU_CACHE_LINE_BYTES);
+
+ for_each_cbr_in_allocation_map(i, &cbrmap, scr) {
+ prefetch_data(cb, 1, GRU_CACHE_LINE_BYTES);
+ prefetch_data(cbe + i * GRU_HANDLE_STRIDE, 1,
+ GRU_CACHE_LINE_BYTES);
+ cb += GRU_HANDLE_STRIDE;
+ }
+}
+
+static void gru_load_context_data(void *save, void *grubase, int ctxnum,
+ unsigned long cbrmap, unsigned long dsrmap,
+ int data_valid)
+{
+ void *gseg, *cb, *cbe;
+ unsigned long length;
+ int i, scr;
+
+ gseg = grubase + ctxnum * GRU_GSEG_STRIDE;
+ cb = gseg + GRU_CB_BASE;
+ cbe = grubase + GRU_CBE_BASE;
+ length = hweight64(dsrmap) * GRU_DSR_AU_BYTES;
+ gru_prefetch_context(gseg, cb, cbe, cbrmap, length);
+
+ for_each_cbr_in_allocation_map(i, &cbrmap, scr) {
+ if (data_valid) {
+ save += gru_copy_handle(cb, save);
+ save += gru_copy_handle(cbe + i * GRU_HANDLE_STRIDE,
+ save);
+ } else {
+ memset(cb, 0, GRU_CACHE_LINE_BYTES);
+ memset(cbe + i * GRU_HANDLE_STRIDE, 0,
+ GRU_CACHE_LINE_BYTES);
+ }
+ /* Flush CBE to hide race in context restart */
+ mb();
+ gru_flush_cache(cbe + i * GRU_HANDLE_STRIDE);
+ cb += GRU_HANDLE_STRIDE;
+ }
+
+ if (data_valid)
+ memcpy(gseg + GRU_DS_BASE, save, length);
+ else
+ memset(gseg + GRU_DS_BASE, 0, length);
+}
+
+static void gru_unload_context_data(void *save, void *grubase, int ctxnum,
+ unsigned long cbrmap, unsigned long dsrmap)
+{
+ void *gseg, *cb, *cbe;
+ unsigned long length;
+ int i, scr;
+
+ gseg = grubase + ctxnum * GRU_GSEG_STRIDE;
+ cb = gseg + GRU_CB_BASE;
+ cbe = grubase + GRU_CBE_BASE;
+ length = hweight64(dsrmap) * GRU_DSR_AU_BYTES;
+
+ /* CBEs may not be coherent. Flush them from cache */
+ for_each_cbr_in_allocation_map(i, &cbrmap, scr)
+ gru_flush_cache(cbe + i * GRU_HANDLE_STRIDE);
+ mb(); /* Let the CL flush complete */
+
+ gru_prefetch_context(gseg, cb, cbe, cbrmap, length);
+
+ for_each_cbr_in_allocation_map(i, &cbrmap, scr) {
+ save += gru_copy_handle(save, cb);
+ save += gru_copy_handle(save, cbe + i * GRU_HANDLE_STRIDE);
+ cb += GRU_HANDLE_STRIDE;
+ }
+ memcpy(save, gseg + GRU_DS_BASE, length);
+}
+
+void gru_unload_context(struct gru_thread_state *gts, int savestate)
+{
+ struct gru_state *gru = gts->ts_gru;
+ struct gru_context_configuration_handle *cch;
+ int ctxnum = gts->ts_ctxnum;
+
+ if (!is_kernel_context(gts))
+ zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE);
+ cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
+
+ gru_dbg(grudev, "gts %p, cbrmap 0x%lx, dsrmap 0x%lx\n",
+ gts, gts->ts_cbr_map, gts->ts_dsr_map);
+ lock_cch_handle(cch);
+ if (cch_interrupt_sync(cch))
+ BUG();
+
+ if (!is_kernel_context(gts))
+ gru_unload_mm_tracker(gru, gts);
+ if (savestate) {
+ gru_unload_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr,
+ ctxnum, gts->ts_cbr_map,
+ gts->ts_dsr_map);
+ gts->ts_data_valid = 1;
+ }
+
+ if (cch_deallocate(cch))
+ BUG();
+ unlock_cch_handle(cch);
+
+ gru_free_gru_context(gts);
+}
+
+/*
+ * Load a GRU context by copying it from the thread data structure in memory
+ * to the GRU.
+ */
+void gru_load_context(struct gru_thread_state *gts)
+{
+ struct gru_state *gru = gts->ts_gru;
+ struct gru_context_configuration_handle *cch;
+ int i, err, asid, ctxnum = gts->ts_ctxnum;
+
+ cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
+ lock_cch_handle(cch);
+ cch->tfm_fault_bit_enable =
+ (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
+ || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
+ cch->tlb_int_enable = (gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
+ if (cch->tlb_int_enable) {
+ gts->ts_tlb_int_select = gru_cpu_fault_map_id();
+ cch->tlb_int_select = gts->ts_tlb_int_select;
+ }
+ if (gts->ts_cch_req_slice >= 0) {
+ cch->req_slice_set_enable = 1;
+ cch->req_slice = gts->ts_cch_req_slice;
+ } else {
+ cch->req_slice_set_enable =0;
+ }
+ cch->tfm_done_bit_enable = 0;
+ cch->dsr_allocation_map = gts->ts_dsr_map;
+ cch->cbr_allocation_map = gts->ts_cbr_map;
+
+ if (is_kernel_context(gts)) {
+ cch->unmap_enable = 1;
+ cch->tfm_done_bit_enable = 1;
+ cch->cb_int_enable = 1;
+ cch->tlb_int_select = 0; /* For now, ints go to cpu 0 */
+ } else {
+ cch->unmap_enable = 0;
+ cch->tfm_done_bit_enable = 0;
+ cch->cb_int_enable = 0;
+ asid = gru_load_mm_tracker(gru, gts);
+ for (i = 0; i < 8; i++) {
+ cch->asid[i] = asid + i;
+ cch->sizeavail[i] = gts->ts_sizeavail;
+ }
+ }
+
+ err = cch_allocate(cch);
+ if (err) {
+ gru_dbg(grudev,
+ "err %d: cch %p, gts %p, cbr 0x%lx, dsr 0x%lx\n",
+ err, cch, gts, gts->ts_cbr_map, gts->ts_dsr_map);
+ BUG();
+ }
+
+ gru_load_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, ctxnum,
+ gts->ts_cbr_map, gts->ts_dsr_map, gts->ts_data_valid);
+
+ if (cch_start(cch))
+ BUG();
+ unlock_cch_handle(cch);
+
+ gru_dbg(grudev, "gid %d, gts %p, cbrmap 0x%lx, dsrmap 0x%lx, tie %d, tis %d\n",
+ gts->ts_gru->gs_gid, gts, gts->ts_cbr_map, gts->ts_dsr_map,
+ (gts->ts_user_options == GRU_OPT_MISS_FMM_INTR), gts->ts_tlb_int_select);
+}
+
+/*
+ * Update fields in an active CCH:
+ * - retarget interrupts on local blade
+ * - update sizeavail mask
+ */
+int gru_update_cch(struct gru_thread_state *gts)
+{
+ struct gru_context_configuration_handle *cch;
+ struct gru_state *gru = gts->ts_gru;
+ int i, ctxnum = gts->ts_ctxnum, ret = 0;
+
+ cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
+
+ lock_cch_handle(cch);
+ if (cch->state == CCHSTATE_ACTIVE) {
+ if (gru->gs_gts[gts->ts_ctxnum] != gts)
+ goto exit;
+ if (cch_interrupt(cch))
+ BUG();
+ for (i = 0; i < 8; i++)
+ cch->sizeavail[i] = gts->ts_sizeavail;
+ gts->ts_tlb_int_select = gru_cpu_fault_map_id();
+ cch->tlb_int_select = gru_cpu_fault_map_id();
+ cch->tfm_fault_bit_enable =
+ (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
+ || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
+ if (cch_start(cch))
+ BUG();
+ ret = 1;
+ }
+exit:
+ unlock_cch_handle(cch);
+ return ret;
+}
+
+/*
+ * Update CCH tlb interrupt select. Required when all the following is true:
+ * - task's GRU context is loaded into a GRU
+ * - task is using interrupt notification for TLB faults
+ * - task has migrated to a different cpu on the same blade where
+ * it was previously running.
+ */
+static int gru_retarget_intr(struct gru_thread_state *gts)
+{
+ if (gts->ts_tlb_int_select < 0
+ || gts->ts_tlb_int_select == gru_cpu_fault_map_id())
+ return 0;
+
+ gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select,
+ gru_cpu_fault_map_id());
+ return gru_update_cch(gts);
+}
+
+/*
+ * Check if a GRU context is allowed to use a specific chiplet. By default
+ * a context is assigned to any blade-local chiplet. However, users can
+ * override this.
+ * Returns 1 if assignment allowed, 0 otherwise
+ */
+static int gru_check_chiplet_assignment(struct gru_state *gru,
+ struct gru_thread_state *gts)
+{
+ int blade_id;
+ int chiplet_id;
+
+ blade_id = gts->ts_user_blade_id;
+ if (blade_id < 0)
+ blade_id = uv_numa_blade_id();
+
+ chiplet_id = gts->ts_user_chiplet_id;
+ return gru->gs_blade_id == blade_id &&
+ (chiplet_id < 0 || chiplet_id == gru->gs_chiplet_id);
+}
+
+/*
+ * Unload the gru context if it is not assigned to the correct blade or
+ * chiplet. Misassignment can occur if the process migrates to a different
+ * blade or if the user changes the selected blade/chiplet.
+ */
+void gru_check_context_placement(struct gru_thread_state *gts)
+{
+ struct gru_state *gru;
+
+ /*
+ * If the current task is the context owner, verify that the
+ * context is correctly placed. This test is skipped for non-owner
+ * references. Pthread apps use non-owner references to the CBRs.
+ */
+ gru = gts->ts_gru;
+ if (!gru || gts->ts_tgid_owner != current->tgid)
+ return;
+
+ if (!gru_check_chiplet_assignment(gru, gts)) {
+ STAT(check_context_unload);
+ gru_unload_context(gts, 1);
+ } else if (gru_retarget_intr(gts)) {
+ STAT(check_context_retarget_intr);
+ }
+}
+
+
+/*
+ * Insufficient GRU resources available on the local blade. Steal a context from
+ * a process. This is a hack until a _real_ resource scheduler is written....
+ */
+#define next_ctxnum(n) ((n) < GRU_NUM_CCH - 2 ? (n) + 1 : 0)
+#define next_gru(b, g) (((g) < &(b)->bs_grus[GRU_CHIPLETS_PER_BLADE - 1]) ? \
+ ((g)+1) : &(b)->bs_grus[0])
+
+static int is_gts_stealable(struct gru_thread_state *gts,
+ struct gru_blade_state *bs)
+{
+ if (is_kernel_context(gts))
+ return down_write_trylock(&bs->bs_kgts_sema);
+ else
+ return mutex_trylock(&gts->ts_ctxlock);
+}
+
+static void gts_stolen(struct gru_thread_state *gts,
+ struct gru_blade_state *bs)
+{
+ if (is_kernel_context(gts)) {
+ up_write(&bs->bs_kgts_sema);
+ STAT(steal_kernel_context);
+ } else {
+ mutex_unlock(&gts->ts_ctxlock);
+ STAT(steal_user_context);
+ }
+}
+
+void gru_steal_context(struct gru_thread_state *gts)
+{
+ struct gru_blade_state *blade;
+ struct gru_state *gru, *gru0;
+ struct gru_thread_state *ngts = NULL;
+ int ctxnum, ctxnum0, flag = 0, cbr, dsr;
+ int blade_id;
+
+ blade_id = gts->ts_user_blade_id;
+ if (blade_id < 0)
+ blade_id = uv_numa_blade_id();
+ cbr = gts->ts_cbr_au_count;
+ dsr = gts->ts_dsr_au_count;
+
+ blade = gru_base[blade_id];
+ spin_lock(&blade->bs_lock);
+
+ ctxnum = next_ctxnum(blade->bs_lru_ctxnum);
+ gru = blade->bs_lru_gru;
+ if (ctxnum == 0)
+ gru = next_gru(blade, gru);
+ blade->bs_lru_gru = gru;
+ blade->bs_lru_ctxnum = ctxnum;
+ ctxnum0 = ctxnum;
+ gru0 = gru;
+ while (1) {
+ if (gru_check_chiplet_assignment(gru, gts)) {
+ if (check_gru_resources(gru, cbr, dsr, GRU_NUM_CCH))
+ break;
+ spin_lock(&gru->gs_lock);
+ for (; ctxnum < GRU_NUM_CCH; ctxnum++) {
+ if (flag && gru == gru0 && ctxnum == ctxnum0)
+ break;
+ ngts = gru->gs_gts[ctxnum];
+ /*
+ * We are grabbing locks out of order, so trylock is
+ * needed. GTSs are usually not locked, so the odds of
+ * success are high. If trylock fails, try to steal a
+ * different GSEG.
+ */
+ if (ngts && is_gts_stealable(ngts, blade))
+ break;
+ ngts = NULL;
+ }
+ spin_unlock(&gru->gs_lock);
+ if (ngts || (flag && gru == gru0 && ctxnum == ctxnum0))
+ break;
+ }
+ if (flag && gru == gru0)
+ break;
+ flag = 1;
+ ctxnum = 0;
+ gru = next_gru(blade, gru);
+ }
+ spin_unlock(&blade->bs_lock);
+
+ if (ngts) {
+ gts->ustats.context_stolen++;
+ ngts->ts_steal_jiffies = jiffies;
+ gru_unload_context(ngts, is_kernel_context(ngts) ? 0 : 1);
+ gts_stolen(ngts, blade);
+ } else {
+ STAT(steal_context_failed);
+ }
+ gru_dbg(grudev,
+ "stole gid %d, ctxnum %d from gts %p. Need cb %d, ds %d;"
+ " avail cb %ld, ds %ld\n",
+ gru->gs_gid, ctxnum, ngts, cbr, dsr, hweight64(gru->gs_cbr_map),
+ hweight64(gru->gs_dsr_map));
+}
+
+/*
+ * Assign a gru context.
+ */
+static int gru_assign_context_number(struct gru_state *gru)
+{
+ int ctxnum;
+
+ ctxnum = find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH);
+ __set_bit(ctxnum, &gru->gs_context_map);
+ return ctxnum;
+}
+
+/*
+ * Scan the GRUs on the local blade & assign a GRU context.
+ */
+struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts)
+{
+ struct gru_state *gru, *grux;
+ int i, max_active_contexts;
+ int blade_id = gts->ts_user_blade_id;
+
+ if (blade_id < 0)
+ blade_id = uv_numa_blade_id();
+again:
+ gru = NULL;
+ max_active_contexts = GRU_NUM_CCH;
+ for_each_gru_on_blade(grux, blade_id, i) {
+ if (!gru_check_chiplet_assignment(grux, gts))
+ continue;
+ if (check_gru_resources(grux, gts->ts_cbr_au_count,
+ gts->ts_dsr_au_count,
+ max_active_contexts)) {
+ gru = grux;
+ max_active_contexts = grux->gs_active_contexts;
+ if (max_active_contexts == 0)
+ break;
+ }
+ }
+
+ if (gru) {
+ spin_lock(&gru->gs_lock);
+ if (!check_gru_resources(gru, gts->ts_cbr_au_count,
+ gts->ts_dsr_au_count, GRU_NUM_CCH)) {
+ spin_unlock(&gru->gs_lock);
+ goto again;
+ }
+ reserve_gru_resources(gru, gts);
+ gts->ts_gru = gru;
+ gts->ts_blade = gru->gs_blade_id;
+ gts->ts_ctxnum = gru_assign_context_number(gru);
+ atomic_inc(&gts->ts_refcnt);
+ gru->gs_gts[gts->ts_ctxnum] = gts;
+ spin_unlock(&gru->gs_lock);
+
+ STAT(assign_context);
+ gru_dbg(grudev,
+ "gseg %p, gts %p, gid %d, ctx %d, cbr %d, dsr %d\n",
+ gseg_virtual_address(gts->ts_gru, gts->ts_ctxnum), gts,
+ gts->ts_gru->gs_gid, gts->ts_ctxnum,
+ gts->ts_cbr_au_count, gts->ts_dsr_au_count);
+ } else {
+ gru_dbg(grudev, "failed to allocate a GTS %s\n", "");
+ STAT(assign_context_failed);
+ }
+
+ return gru;
+}
+
+/*
+ * gru_nopage
+ *
+ * Map the user's GRU segment
+ *
+ * Note: gru segments alway mmaped on GRU_GSEG_PAGESIZE boundaries.
+ */
+int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct gru_thread_state *gts;
+ unsigned long paddr, vaddr;
+
+ vaddr = (unsigned long)vmf->virtual_address;
+ gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n",
+ vma, vaddr, GSEG_BASE(vaddr));
+ STAT(nopfn);
+
+ /* The following check ensures vaddr is a valid address in the VMA */
+ gts = gru_find_thread_state(vma, TSID(vaddr, vma));
+ if (!gts)
+ return VM_FAULT_SIGBUS;
+
+again:
+ mutex_lock(&gts->ts_ctxlock);
+ preempt_disable();
+
+ gru_check_context_placement(gts);
+
+ if (!gts->ts_gru) {
+ STAT(load_user_context);
+ if (!gru_assign_gru_context(gts)) {
+ preempt_enable();
+ mutex_unlock(&gts->ts_ctxlock);
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(GRU_ASSIGN_DELAY); /* true hack ZZZ */
+ if (gts->ts_steal_jiffies + GRU_STEAL_DELAY < jiffies)
+ gru_steal_context(gts);
+ goto again;
+ }
+ gru_load_context(gts);
+ paddr = gseg_physical_address(gts->ts_gru, gts->ts_ctxnum);
+ remap_pfn_range(vma, vaddr & ~(GRU_GSEG_PAGESIZE - 1),
+ paddr >> PAGE_SHIFT, GRU_GSEG_PAGESIZE,
+ vma->vm_page_prot);
+ }
+
+ preempt_enable();
+ mutex_unlock(&gts->ts_ctxlock);
+
+ return VM_FAULT_NOPAGE;
+}
+
diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
new file mode 100644
index 00000000..950dbe9e
--- /dev/null
+++ b/drivers/misc/sgi-gru/gruprocfs.c
@@ -0,0 +1,381 @@
+/*
+ * SN Platform GRU Driver
+ *
+ * PROC INTERFACES
+ *
+ * This file supports the /proc interfaces for the GRU driver
+ *
+ * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/proc_fs.h>
+#include <linux/device.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include "gru.h"
+#include "grulib.h"
+#include "grutables.h"
+
+#define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
+
+static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
+{
+ unsigned long val = atomic_long_read(v);
+
+ seq_printf(s, "%16lu %s\n", val, id);
+}
+
+static int statistics_show(struct seq_file *s, void *p)
+{
+ printstat(s, vdata_alloc);
+ printstat(s, vdata_free);
+ printstat(s, gts_alloc);
+ printstat(s, gts_free);
+ printstat(s, gms_alloc);
+ printstat(s, gms_free);
+ printstat(s, gts_double_allocate);
+ printstat(s, assign_context);
+ printstat(s, assign_context_failed);
+ printstat(s, free_context);
+ printstat(s, load_user_context);
+ printstat(s, load_kernel_context);
+ printstat(s, lock_kernel_context);
+ printstat(s, unlock_kernel_context);
+ printstat(s, steal_user_context);
+ printstat(s, steal_kernel_context);
+ printstat(s, steal_context_failed);
+ printstat(s, nopfn);
+ printstat(s, asid_new);
+ printstat(s, asid_next);
+ printstat(s, asid_wrap);
+ printstat(s, asid_reuse);
+ printstat(s, intr);
+ printstat(s, intr_cbr);
+ printstat(s, intr_tfh);
+ printstat(s, intr_spurious);
+ printstat(s, intr_mm_lock_failed);
+ printstat(s, call_os);
+ printstat(s, call_os_wait_queue);
+ printstat(s, user_flush_tlb);
+ printstat(s, user_unload_context);
+ printstat(s, user_exception);
+ printstat(s, set_context_option);
+ printstat(s, check_context_retarget_intr);
+ printstat(s, check_context_unload);
+ printstat(s, tlb_dropin);
+ printstat(s, tlb_preload_page);
+ printstat(s, tlb_dropin_fail_no_asid);
+ printstat(s, tlb_dropin_fail_upm);
+ printstat(s, tlb_dropin_fail_invalid);
+ printstat(s, tlb_dropin_fail_range_active);
+ printstat(s, tlb_dropin_fail_idle);
+ printstat(s, tlb_dropin_fail_fmm);
+ printstat(s, tlb_dropin_fail_no_exception);
+ printstat(s, tfh_stale_on_fault);
+ printstat(s, mmu_invalidate_range);
+ printstat(s, mmu_invalidate_page);
+ printstat(s, flush_tlb);
+ printstat(s, flush_tlb_gru);
+ printstat(s, flush_tlb_gru_tgh);
+ printstat(s, flush_tlb_gru_zero_asid);
+ printstat(s, copy_gpa);
+ printstat(s, read_gpa);
+ printstat(s, mesq_receive);
+ printstat(s, mesq_receive_none);
+ printstat(s, mesq_send);
+ printstat(s, mesq_send_failed);
+ printstat(s, mesq_noop);
+ printstat(s, mesq_send_unexpected_error);
+ printstat(s, mesq_send_lb_overflow);
+ printstat(s, mesq_send_qlimit_reached);
+ printstat(s, mesq_send_amo_nacked);
+ printstat(s, mesq_send_put_nacked);
+ printstat(s, mesq_qf_locked);
+ printstat(s, mesq_qf_noop_not_full);
+ printstat(s, mesq_qf_switch_head_failed);
+ printstat(s, mesq_qf_unexpected_error);
+ printstat(s, mesq_noop_unexpected_error);
+ printstat(s, mesq_noop_lb_overflow);
+ printstat(s, mesq_noop_qlimit_reached);
+ printstat(s, mesq_noop_amo_nacked);
+ printstat(s, mesq_noop_put_nacked);
+ printstat(s, mesq_noop_page_overflow);
+ return 0;
+}
+
+static ssize_t statistics_write(struct file *file, const char __user *userbuf,
+ size_t count, loff_t *data)
+{
+ memset(&gru_stats, 0, sizeof(gru_stats));
+ return count;
+}
+
+static int mcs_statistics_show(struct seq_file *s, void *p)
+{
+ int op;
+ unsigned long total, count, max;
+ static char *id[] = {"cch_allocate", "cch_start", "cch_interrupt",
+ "cch_interrupt_sync", "cch_deallocate", "tfh_write_only",
+ "tfh_write_restart", "tgh_invalidate"};
+
+ seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
+ for (op = 0; op < mcsop_last; op++) {
+ count = atomic_long_read(&mcs_op_statistics[op].count);
+ total = atomic_long_read(&mcs_op_statistics[op].total);
+ max = mcs_op_statistics[op].max;
+ seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
+ count ? total / count : 0, max);
+ }
+ return 0;
+}
+
+static ssize_t mcs_statistics_write(struct file *file,
+ const char __user *userbuf, size_t count, loff_t *data)
+{
+ memset(mcs_op_statistics, 0, sizeof(mcs_op_statistics));
+ return count;
+}
+
+static int options_show(struct seq_file *s, void *p)
+{
+ seq_printf(s, "#bitmask: 1=trace, 2=statistics\n");
+ seq_printf(s, "0x%lx\n", gru_options);
+ return 0;
+}
+
+static ssize_t options_write(struct file *file, const char __user *userbuf,
+ size_t count, loff_t *data)
+{
+ char buf[20];
+
+ if (count >= sizeof(buf))
+ return -EINVAL;
+ if (copy_from_user(buf, userbuf, count))
+ return -EFAULT;
+ buf[count] = '\0';
+ if (strict_strtoul(buf, 0, &gru_options))
+ return -EINVAL;
+
+ return count;
+}
+
+static int cch_seq_show(struct seq_file *file, void *data)
+{
+ long gid = *(long *)data;
+ int i;
+ struct gru_state *gru = GID_TO_GRU(gid);
+ struct gru_thread_state *ts;
+ const char *mode[] = { "??", "UPM", "INTR", "OS_POLL" };
+
+ if (gid == 0)
+ seq_printf(file, "#%5s%5s%6s%7s%9s%6s%8s%8s\n", "gid", "bid",
+ "ctx#", "asid", "pid", "cbrs", "dsbytes", "mode");
+ if (gru)
+ for (i = 0; i < GRU_NUM_CCH; i++) {
+ ts = gru->gs_gts[i];
+ if (!ts)
+ continue;
+ seq_printf(file, " %5d%5d%6d%7d%9d%6d%8d%8s\n",
+ gru->gs_gid, gru->gs_blade_id, i,
+ is_kernel_context(ts) ? 0 : ts->ts_gms->ms_asids[gid].mt_asid,
+ is_kernel_context(ts) ? 0 : ts->ts_tgid_owner,
+ ts->ts_cbr_au_count * GRU_CBR_AU_SIZE,
+ ts->ts_cbr_au_count * GRU_DSR_AU_BYTES,
+ mode[ts->ts_user_options &
+ GRU_OPT_MISS_MASK]);
+ }
+
+ return 0;
+}
+
+static int gru_seq_show(struct seq_file *file, void *data)
+{
+ long gid = *(long *)data, ctxfree, cbrfree, dsrfree;
+ struct gru_state *gru = GID_TO_GRU(gid);
+
+ if (gid == 0) {
+ seq_printf(file, "#%5s%5s%7s%6s%6s%8s%6s%6s\n", "gid", "nid",
+ "ctx", "cbr", "dsr", "ctx", "cbr", "dsr");
+ seq_printf(file, "#%5s%5s%7s%6s%6s%8s%6s%6s\n", "", "", "busy",
+ "busy", "busy", "free", "free", "free");
+ }
+ if (gru) {
+ ctxfree = GRU_NUM_CCH - gru->gs_active_contexts;
+ cbrfree = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE;
+ dsrfree = hweight64(gru->gs_dsr_map) * GRU_DSR_AU_BYTES;
+ seq_printf(file, " %5d%5d%7ld%6ld%6ld%8ld%6ld%6ld\n",
+ gru->gs_gid, gru->gs_blade_id, GRU_NUM_CCH - ctxfree,
+ GRU_NUM_CBE - cbrfree, GRU_NUM_DSR_BYTES - dsrfree,
+ ctxfree, cbrfree, dsrfree);
+ }
+
+ return 0;
+}
+
+static void seq_stop(struct seq_file *file, void *data)
+{
+}
+
+static void *seq_start(struct seq_file *file, loff_t *gid)
+{
+ if (*gid < gru_max_gids)
+ return gid;
+ return NULL;
+}
+
+static void *seq_next(struct seq_file *file, void *data, loff_t *gid)
+{
+ (*gid)++;
+ if (*gid < gru_max_gids)
+ return gid;
+ return NULL;
+}
+
+static const struct seq_operations cch_seq_ops = {
+ .start = seq_start,
+ .next = seq_next,
+ .stop = seq_stop,
+ .show = cch_seq_show
+};
+
+static const struct seq_operations gru_seq_ops = {
+ .start = seq_start,
+ .next = seq_next,
+ .stop = seq_stop,
+ .show = gru_seq_show
+};
+
+static int statistics_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, statistics_show, NULL);
+}
+
+static int mcs_statistics_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mcs_statistics_show, NULL);
+}
+
+static int options_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, options_show, NULL);
+}
+
+static int cch_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &cch_seq_ops);
+}
+
+static int gru_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &gru_seq_ops);
+}
+
+/* *INDENT-OFF* */
+static const struct file_operations statistics_fops = {
+ .open = statistics_open,
+ .read = seq_read,
+ .write = statistics_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations mcs_statistics_fops = {
+ .open = mcs_statistics_open,
+ .read = seq_read,
+ .write = mcs_statistics_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations options_fops = {
+ .open = options_open,
+ .read = seq_read,
+ .write = options_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations cch_fops = {
+ .open = cch_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+static const struct file_operations gru_fops = {
+ .open = gru_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static struct proc_entry {
+ char *name;
+ umode_t mode;
+ const struct file_operations *fops;
+ struct proc_dir_entry *entry;
+} proc_files[] = {
+ {"statistics", 0644, &statistics_fops},
+ {"mcs_statistics", 0644, &mcs_statistics_fops},
+ {"debug_options", 0644, &options_fops},
+ {"cch_status", 0444, &cch_fops},
+ {"gru_status", 0444, &gru_fops},
+ {NULL}
+};
+/* *INDENT-ON* */
+
+static struct proc_dir_entry *proc_gru __read_mostly;
+
+static int create_proc_file(struct proc_entry *p)
+{
+ p->entry = proc_create(p->name, p->mode, proc_gru, p->fops);
+ if (!p->entry)
+ return -1;
+ return 0;
+}
+
+static void delete_proc_files(void)
+{
+ struct proc_entry *p;
+
+ if (proc_gru) {
+ for (p = proc_files; p->name; p++)
+ if (p->entry)
+ remove_proc_entry(p->name, proc_gru);
+ remove_proc_entry("gru", proc_gru->parent);
+ }
+}
+
+int gru_proc_init(void)
+{
+ struct proc_entry *p;
+
+ proc_gru = proc_mkdir("sgi_uv/gru", NULL);
+
+ for (p = proc_files; p->name; p++)
+ if (create_proc_file(p))
+ goto err;
+ return 0;
+
+err:
+ delete_proc_files();
+ return -1;
+}
+
+void gru_proc_exit(void)
+{
+ delete_proc_files();
+}
diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
new file mode 100644
index 00000000..5c3ce245
--- /dev/null
+++ b/drivers/misc/sgi-gru/grutables.h
@@ -0,0 +1,678 @@
+/*
+ * SN Platform GRU Driver
+ *
+ * GRU DRIVER TABLES, MACROS, externs, etc
+ *
+ * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __GRUTABLES_H__
+#define __GRUTABLES_H__
+
+/*
+ * GRU Chiplet:
+ * The GRU is a user addressible memory accelerator. It provides
+ * several forms of load, store, memset, bcopy instructions. In addition, it
+ * contains special instructions for AMOs, sending messages to message
+ * queues, etc.
+ *
+ * The GRU is an integral part of the node controller. It connects
+ * directly to the cpu socket. In its current implementation, there are 2
+ * GRU chiplets in the node controller on each blade (~node).
+ *
+ * The entire GRU memory space is fully coherent and cacheable by the cpus.
+ *
+ * Each GRU chiplet has a physical memory map that looks like the following:
+ *
+ * +-----------------+
+ * |/////////////////|
+ * |/////////////////|
+ * |/////////////////|
+ * |/////////////////|
+ * |/////////////////|
+ * |/////////////////|
+ * |/////////////////|
+ * |/////////////////|
+ * +-----------------+
+ * | system control |
+ * +-----------------+ _______ +-------------+
+ * |/////////////////| / | |
+ * |/////////////////| / | |
+ * |/////////////////| / | instructions|
+ * |/////////////////| / | |
+ * |/////////////////| / | |
+ * |/////////////////| / |-------------|
+ * |/////////////////| / | |
+ * +-----------------+ | |
+ * | context 15 | | data |
+ * +-----------------+ | |
+ * | ...... | \ | |
+ * +-----------------+ \____________ +-------------+
+ * | context 1 |
+ * +-----------------+
+ * | context 0 |
+ * +-----------------+
+ *
+ * Each of the "contexts" is a chunk of memory that can be mmaped into user
+ * space. The context consists of 2 parts:
+ *
+ * - an instruction space that can be directly accessed by the user
+ * to issue GRU instructions and to check instruction status.
+ *
+ * - a data area that acts as normal RAM.
+ *
+ * User instructions contain virtual addresses of data to be accessed by the
+ * GRU. The GRU contains a TLB that is used to convert these user virtual
+ * addresses to physical addresses.
+ *
+ * The "system control" area of the GRU chiplet is used by the kernel driver
+ * to manage user contexts and to perform functions such as TLB dropin and
+ * purging.
+ *
+ * One context may be reserved for the kernel and used for cross-partition
+ * communication. The GRU will also be used to asynchronously zero out
+ * large blocks of memory (not currently implemented).
+ *
+ *
+ * Tables:
+ *
+ * VDATA-VMA Data - Holds a few parameters. Head of linked list of
+ * GTS tables for threads using the GSEG
+ * GTS - Gru Thread State - contains info for managing a GSEG context. A
+ * GTS is allocated for each thread accessing a
+ * GSEG.
+ * GTD - GRU Thread Data - contains shadow copy of GRU data when GSEG is
+ * not loaded into a GRU
+ * GMS - GRU Memory Struct - Used to manage TLB shootdowns. Tracks GRUs
+ * where a GSEG has been loaded. Similar to
+ * an mm_struct but for GRU.
+ *
+ * GS - GRU State - Used to manage the state of a GRU chiplet
+ * BS - Blade State - Used to manage state of all GRU chiplets
+ * on a blade
+ *
+ *
+ * Normal task tables for task using GRU.
+ * - 2 threads in process
+ * - 2 GSEGs open in process
+ * - GSEG1 is being used by both threads
+ * - GSEG2 is used only by thread 2
+ *
+ * task -->|
+ * task ---+---> mm ->------ (notifier) -------+-> gms
+ * | |
+ * |--> vma -> vdata ---> gts--->| GSEG1 (thread1)
+ * | | |
+ * | +-> gts--->| GSEG1 (thread2)
+ * | |
+ * |--> vma -> vdata ---> gts--->| GSEG2 (thread2)
+ * .
+ * .
+ *
+ * GSEGs are marked DONTCOPY on fork
+ *
+ * At open
+ * file.private_data -> NULL
+ *
+ * At mmap,
+ * vma -> vdata
+ *
+ * After gseg reference
+ * vma -> vdata ->gts
+ *
+ * After fork
+ * parent
+ * vma -> vdata -> gts
+ * child
+ * (vma is not copied)
+ *
+ */
+
+#include <linux/rmap.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/wait.h>
+#include <linux/mmu_notifier.h>
+#include "gru.h"
+#include "grulib.h"
+#include "gruhandles.h"
+
+extern struct gru_stats_s gru_stats;
+extern struct gru_blade_state *gru_base[];
+extern unsigned long gru_start_paddr, gru_end_paddr;
+extern void *gru_start_vaddr;
+extern unsigned int gru_max_gids;
+
+#define GRU_MAX_BLADES MAX_NUMNODES
+#define GRU_MAX_GRUS (GRU_MAX_BLADES * GRU_CHIPLETS_PER_BLADE)
+
+#define GRU_DRIVER_ID_STR "SGI GRU Device Driver"
+#define GRU_DRIVER_VERSION_STR "0.85"
+
+/*
+ * GRU statistics.
+ */
+struct gru_stats_s {
+ atomic_long_t vdata_alloc;
+ atomic_long_t vdata_free;
+ atomic_long_t gts_alloc;
+ atomic_long_t gts_free;
+ atomic_long_t gms_alloc;
+ atomic_long_t gms_free;
+ atomic_long_t gts_double_allocate;
+ atomic_long_t assign_context;
+ atomic_long_t assign_context_failed;
+ atomic_long_t free_context;
+ atomic_long_t load_user_context;
+ atomic_long_t load_kernel_context;
+ atomic_long_t lock_kernel_context;
+ atomic_long_t unlock_kernel_context;
+ atomic_long_t steal_user_context;
+ atomic_long_t steal_kernel_context;
+ atomic_long_t steal_context_failed;
+ atomic_long_t nopfn;
+ atomic_long_t asid_new;
+ atomic_long_t asid_next;
+ atomic_long_t asid_wrap;
+ atomic_long_t asid_reuse;
+ atomic_long_t intr;
+ atomic_long_t intr_cbr;
+ atomic_long_t intr_tfh;
+ atomic_long_t intr_spurious;
+ atomic_long_t intr_mm_lock_failed;
+ atomic_long_t call_os;
+ atomic_long_t call_os_wait_queue;
+ atomic_long_t user_flush_tlb;
+ atomic_long_t user_unload_context;
+ atomic_long_t user_exception;
+ atomic_long_t set_context_option;
+ atomic_long_t check_context_retarget_intr;
+ atomic_long_t check_context_unload;
+ atomic_long_t tlb_dropin;
+ atomic_long_t tlb_preload_page;
+ atomic_long_t tlb_dropin_fail_no_asid;
+ atomic_long_t tlb_dropin_fail_upm;
+ atomic_long_t tlb_dropin_fail_invalid;
+ atomic_long_t tlb_dropin_fail_range_active;
+ atomic_long_t tlb_dropin_fail_idle;
+ atomic_long_t tlb_dropin_fail_fmm;
+ atomic_long_t tlb_dropin_fail_no_exception;
+ atomic_long_t tfh_stale_on_fault;
+ atomic_long_t mmu_invalidate_range;
+ atomic_long_t mmu_invalidate_page;
+ atomic_long_t flush_tlb;
+ atomic_long_t flush_tlb_gru;
+ atomic_long_t flush_tlb_gru_tgh;
+ atomic_long_t flush_tlb_gru_zero_asid;
+
+ atomic_long_t copy_gpa;
+ atomic_long_t read_gpa;
+
+ atomic_long_t mesq_receive;
+ atomic_long_t mesq_receive_none;
+ atomic_long_t mesq_send;
+ atomic_long_t mesq_send_failed;
+ atomic_long_t mesq_noop;
+ atomic_long_t mesq_send_unexpected_error;
+ atomic_long_t mesq_send_lb_overflow;
+ atomic_long_t mesq_send_qlimit_reached;
+ atomic_long_t mesq_send_amo_nacked;
+ atomic_long_t mesq_send_put_nacked;
+ atomic_long_t mesq_page_overflow;
+ atomic_long_t mesq_qf_locked;
+ atomic_long_t mesq_qf_noop_not_full;
+ atomic_long_t mesq_qf_switch_head_failed;
+ atomic_long_t mesq_qf_unexpected_error;
+ atomic_long_t mesq_noop_unexpected_error;
+ atomic_long_t mesq_noop_lb_overflow;
+ atomic_long_t mesq_noop_qlimit_reached;
+ atomic_long_t mesq_noop_amo_nacked;
+ atomic_long_t mesq_noop_put_nacked;
+ atomic_long_t mesq_noop_page_overflow;
+
+};
+
+enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
+ cchop_deallocate, tfhop_write_only, tfhop_write_restart,
+ tghop_invalidate, mcsop_last};
+
+struct mcs_op_statistic {
+ atomic_long_t count;
+ atomic_long_t total;
+ unsigned long max;
+};
+
+extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
+
+#define OPT_DPRINT 1
+#define OPT_STATS 2
+
+
+#define IRQ_GRU 110 /* Starting IRQ number for interrupts */
+
+/* Delay in jiffies between attempts to assign a GRU context */
+#define GRU_ASSIGN_DELAY ((HZ * 20) / 1000)
+
+/*
+ * If a process has it's context stolen, min delay in jiffies before trying to
+ * steal a context from another process.
+ */
+#define GRU_STEAL_DELAY ((HZ * 200) / 1000)
+
+#define STAT(id) do { \
+ if (gru_options & OPT_STATS) \
+ atomic_long_inc(&gru_stats.id); \
+ } while (0)
+
+#ifdef CONFIG_SGI_GRU_DEBUG
+#define gru_dbg(dev, fmt, x...) \
+ do { \
+ if (gru_options & OPT_DPRINT) \
+ printk(KERN_DEBUG "GRU:%d %s: " fmt, smp_processor_id(), __func__, x);\
+ } while (0)
+#else
+#define gru_dbg(x...)
+#endif
+
+/*-----------------------------------------------------------------------------
+ * ASID management
+ */
+#define MAX_ASID 0xfffff0
+#define MIN_ASID 8
+#define ASID_INC 8 /* number of regions */
+
+/* Generate a GRU asid value from a GRU base asid & a virtual address. */
+#define VADDR_HI_BIT 64
+#define GRUREGION(addr) ((addr) >> (VADDR_HI_BIT - 3) & 3)
+#define GRUASID(asid, addr) ((asid) + GRUREGION(addr))
+
+/*------------------------------------------------------------------------------
+ * File & VMS Tables
+ */
+
+struct gru_state;
+
+/*
+ * This structure is pointed to from the mmstruct via the notifier pointer.
+ * There is one of these per address space.
+ */
+struct gru_mm_tracker { /* pack to reduce size */
+ unsigned int mt_asid_gen:24; /* ASID wrap count */
+ unsigned int mt_asid:24; /* current base ASID for gru */
+ unsigned short mt_ctxbitmap:16;/* bitmap of contexts using
+ asid */
+} __attribute__ ((packed));
+
+struct gru_mm_struct {
+ struct mmu_notifier ms_notifier;
+ atomic_t ms_refcnt;
+ spinlock_t ms_asid_lock; /* protects ASID assignment */
+ atomic_t ms_range_active;/* num range_invals active */
+ char ms_released;
+ wait_queue_head_t ms_wait_queue;
+ DECLARE_BITMAP(ms_asidmap, GRU_MAX_GRUS);
+ struct gru_mm_tracker ms_asids[GRU_MAX_GRUS];
+};
+
+/*
+ * One of these structures is allocated when a GSEG is mmaped. The
+ * structure is pointed to by the vma->vm_private_data field in the vma struct.
+ */
+struct gru_vma_data {
+ spinlock_t vd_lock; /* Serialize access to vma */
+ struct list_head vd_head; /* head of linked list of gts */
+ long vd_user_options;/* misc user option flags */
+ int vd_cbr_au_count;
+ int vd_dsr_au_count;
+ unsigned char vd_tlb_preload_count;
+};
+
+/*
+ * One of these is allocated for each thread accessing a mmaped GRU. A linked
+ * list of these structure is hung off the struct gru_vma_data in the mm_struct.
+ */
+struct gru_thread_state {
+ struct list_head ts_next; /* list - head at vma-private */
+ struct mutex ts_ctxlock; /* load/unload CTX lock */
+ struct mm_struct *ts_mm; /* mm currently mapped to
+ context */
+ struct vm_area_struct *ts_vma; /* vma of GRU context */
+ struct gru_state *ts_gru; /* GRU where the context is
+ loaded */
+ struct gru_mm_struct *ts_gms; /* asid & ioproc struct */
+ unsigned char ts_tlb_preload_count; /* TLB preload pages */
+ unsigned long ts_cbr_map; /* map of allocated CBRs */
+ unsigned long ts_dsr_map; /* map of allocated DATA
+ resources */
+ unsigned long ts_steal_jiffies;/* jiffies when context last
+ stolen */
+ long ts_user_options;/* misc user option flags */
+ pid_t ts_tgid_owner; /* task that is using the
+ context - for migration */
+ short ts_user_blade_id;/* user selected blade */
+ char ts_user_chiplet_id;/* user selected chiplet */
+ unsigned short ts_sizeavail; /* Pagesizes in use */
+ int ts_tsid; /* thread that owns the
+ structure */
+ int ts_tlb_int_select;/* target cpu if interrupts
+ enabled */
+ int ts_ctxnum; /* context number where the
+ context is loaded */
+ atomic_t ts_refcnt; /* reference count GTS */
+ unsigned char ts_dsr_au_count;/* Number of DSR resources
+ required for contest */
+ unsigned char ts_cbr_au_count;/* Number of CBR resources
+ required for contest */
+ char ts_cch_req_slice;/* CCH packet slice */
+ char ts_blade; /* If >= 0, migrate context if
+ ref from different blade */
+ char ts_force_cch_reload;
+ char ts_cbr_idx[GRU_CBR_AU];/* CBR numbers of each
+ allocated CB */
+ int ts_data_valid; /* Indicates if ts_gdata has
+ valid data */
+ struct gru_gseg_statistics ustats; /* User statistics */
+ unsigned long ts_gdata[0]; /* save area for GRU data (CB,
+ DS, CBE) */
+};
+
+/*
+ * Threaded programs actually allocate an array of GSEGs when a context is
+ * created. Each thread uses a separate GSEG. TSID is the index into the GSEG
+ * array.
+ */
+#define TSID(a, v) (((a) - (v)->vm_start) / GRU_GSEG_PAGESIZE)
+#define UGRUADDR(gts) ((gts)->ts_vma->vm_start + \
+ (gts)->ts_tsid * GRU_GSEG_PAGESIZE)
+
+#define NULLCTX (-1) /* if context not loaded into GRU */
+
+/*-----------------------------------------------------------------------------
+ * GRU State Tables
+ */
+
+/*
+ * One of these exists for each GRU chiplet.
+ */
+struct gru_state {
+ struct gru_blade_state *gs_blade; /* GRU state for entire
+ blade */
+ unsigned long gs_gru_base_paddr; /* Physical address of
+ gru segments (64) */
+ void *gs_gru_base_vaddr; /* Virtual address of
+ gru segments (64) */
+ unsigned short gs_gid; /* unique GRU number */
+ unsigned short gs_blade_id; /* blade of GRU */
+ unsigned char gs_chiplet_id; /* blade chiplet of GRU */
+ unsigned char gs_tgh_local_shift; /* used to pick TGH for
+ local flush */
+ unsigned char gs_tgh_first_remote; /* starting TGH# for
+ remote flush */
+ spinlock_t gs_asid_lock; /* lock used for
+ assigning asids */
+ spinlock_t gs_lock; /* lock used for
+ assigning contexts */
+
+ /* -- the following are protected by the gs_asid_lock spinlock ---- */
+ unsigned int gs_asid; /* Next availe ASID */
+ unsigned int gs_asid_limit; /* Limit of available
+ ASIDs */
+ unsigned int gs_asid_gen; /* asid generation.
+ Inc on wrap */
+
+ /* --- the following fields are protected by the gs_lock spinlock --- */
+ unsigned long gs_context_map; /* bitmap to manage
+ contexts in use */
+ unsigned long gs_cbr_map; /* bitmap to manage CB
+ resources */
+ unsigned long gs_dsr_map; /* bitmap used to manage
+ DATA resources */
+ unsigned int gs_reserved_cbrs; /* Number of kernel-
+ reserved cbrs */
+ unsigned int gs_reserved_dsr_bytes; /* Bytes of kernel-
+ reserved dsrs */
+ unsigned short gs_active_contexts; /* number of contexts
+ in use */
+ struct gru_thread_state *gs_gts[GRU_NUM_CCH]; /* GTS currently using
+ the context */
+ int gs_irq[GRU_NUM_TFM]; /* Interrupt irqs */
+};
+
+/*
+ * This structure contains the GRU state for all the GRUs on a blade.
+ */
+struct gru_blade_state {
+ void *kernel_cb; /* First kernel
+ reserved cb */
+ void *kernel_dsr; /* First kernel
+ reserved DSR */
+ struct rw_semaphore bs_kgts_sema; /* lock for kgts */
+ struct gru_thread_state *bs_kgts; /* GTS for kernel use */
+
+ /* ---- the following are used for managing kernel async GRU CBRs --- */
+ int bs_async_dsr_bytes; /* DSRs for async */
+ int bs_async_cbrs; /* CBRs AU for async */
+ struct completion *bs_async_wq;
+
+ /* ---- the following are protected by the bs_lock spinlock ---- */
+ spinlock_t bs_lock; /* lock used for
+ stealing contexts */
+ int bs_lru_ctxnum; /* STEAL - last context
+ stolen */
+ struct gru_state *bs_lru_gru; /* STEAL - last gru
+ stolen */
+
+ struct gru_state bs_grus[GRU_CHIPLETS_PER_BLADE];
+};
+
+/*-----------------------------------------------------------------------------
+ * Address Primitives
+ */
+#define get_tfm_for_cpu(g, c) \
+ ((struct gru_tlb_fault_map *)get_tfm((g)->gs_gru_base_vaddr, (c)))
+#define get_tfh_by_index(g, i) \
+ ((struct gru_tlb_fault_handle *)get_tfh((g)->gs_gru_base_vaddr, (i)))
+#define get_tgh_by_index(g, i) \
+ ((struct gru_tlb_global_handle *)get_tgh((g)->gs_gru_base_vaddr, (i)))
+#define get_cbe_by_index(g, i) \
+ ((struct gru_control_block_extended *)get_cbe((g)->gs_gru_base_vaddr,\
+ (i)))
+
+/*-----------------------------------------------------------------------------
+ * Useful Macros
+ */
+
+/* Given a blade# & chiplet#, get a pointer to the GRU */
+#define get_gru(b, c) (&gru_base[b]->bs_grus[c])
+
+/* Number of bytes to save/restore when unloading/loading GRU contexts */
+#define DSR_BYTES(dsr) ((dsr) * GRU_DSR_AU_BYTES)
+#define CBR_BYTES(cbr) ((cbr) * GRU_HANDLE_BYTES * GRU_CBR_AU_SIZE * 2)
+
+/* Convert a user CB number to the actual CBRNUM */
+#define thread_cbr_number(gts, n) ((gts)->ts_cbr_idx[(n) / GRU_CBR_AU_SIZE] \
+ * GRU_CBR_AU_SIZE + (n) % GRU_CBR_AU_SIZE)
+
+/* Convert a gid to a pointer to the GRU */
+#define GID_TO_GRU(gid) \
+ (gru_base[(gid) / GRU_CHIPLETS_PER_BLADE] ? \
+ (&gru_base[(gid) / GRU_CHIPLETS_PER_BLADE]-> \
+ bs_grus[(gid) % GRU_CHIPLETS_PER_BLADE]) : \
+ NULL)
+
+/* Scan all active GRUs in a GRU bitmap */
+#define for_each_gru_in_bitmap(gid, map) \
+ for_each_set_bit((gid), (map), GRU_MAX_GRUS)
+
+/* Scan all active GRUs on a specific blade */
+#define for_each_gru_on_blade(gru, nid, i) \
+ for ((gru) = gru_base[nid]->bs_grus, (i) = 0; \
+ (i) < GRU_CHIPLETS_PER_BLADE; \
+ (i)++, (gru)++)
+
+/* Scan all GRUs */
+#define foreach_gid(gid) \
+ for ((gid) = 0; (gid) < gru_max_gids; (gid)++)
+
+/* Scan all active GTSs on a gru. Note: must hold ss_lock to use this macro. */
+#define for_each_gts_on_gru(gts, gru, ctxnum) \
+ for ((ctxnum) = 0; (ctxnum) < GRU_NUM_CCH; (ctxnum)++) \
+ if (((gts) = (gru)->gs_gts[ctxnum]))
+
+/* Scan each CBR whose bit is set in a TFM (or copy of) */
+#define for_each_cbr_in_tfm(i, map) \
+ for_each_set_bit((i), (map), GRU_NUM_CBE)
+
+/* Scan each CBR in a CBR bitmap. Note: multiple CBRs in an allocation unit */
+#define for_each_cbr_in_allocation_map(i, map, k) \
+ for_each_set_bit((k), (map), GRU_CBR_AU) \
+ for ((i) = (k)*GRU_CBR_AU_SIZE; \
+ (i) < ((k) + 1) * GRU_CBR_AU_SIZE; (i)++)
+
+/* Scan each DSR in a DSR bitmap. Note: multiple DSRs in an allocation unit */
+#define for_each_dsr_in_allocation_map(i, map, k) \
+ for_each_set_bit((k), (const unsigned long *)(map), GRU_DSR_AU) \
+ for ((i) = (k) * GRU_DSR_AU_CL; \
+ (i) < ((k) + 1) * GRU_DSR_AU_CL; (i)++)
+
+#define gseg_physical_address(gru, ctxnum) \
+ ((gru)->gs_gru_base_paddr + ctxnum * GRU_GSEG_STRIDE)
+#define gseg_virtual_address(gru, ctxnum) \
+ ((gru)->gs_gru_base_vaddr + ctxnum * GRU_GSEG_STRIDE)
+
+/*-----------------------------------------------------------------------------
+ * Lock / Unlock GRU handles
+ * Use the "delresp" bit in the handle as a "lock" bit.
+ */
+
+/* Lock hierarchy checking enabled only in emulator */
+
+/* 0 = lock failed, 1 = locked */
+static inline int __trylock_handle(void *h)
+{
+ return !test_and_set_bit(1, h);
+}
+
+static inline void __lock_handle(void *h)
+{
+ while (test_and_set_bit(1, h))
+ cpu_relax();
+}
+
+static inline void __unlock_handle(void *h)
+{
+ clear_bit(1, h);
+}
+
+static inline int trylock_cch_handle(struct gru_context_configuration_handle *cch)
+{
+ return __trylock_handle(cch);
+}
+
+static inline void lock_cch_handle(struct gru_context_configuration_handle *cch)
+{
+ __lock_handle(cch);
+}
+
+static inline void unlock_cch_handle(struct gru_context_configuration_handle
+ *cch)
+{
+ __unlock_handle(cch);
+}
+
+static inline void lock_tgh_handle(struct gru_tlb_global_handle *tgh)
+{
+ __lock_handle(tgh);
+}
+
+static inline void unlock_tgh_handle(struct gru_tlb_global_handle *tgh)
+{
+ __unlock_handle(tgh);
+}
+
+static inline int is_kernel_context(struct gru_thread_state *gts)
+{
+ return !gts->ts_mm;
+}
+
+/*
+ * The following are for Nehelem-EX. A more general scheme is needed for
+ * future processors.
+ */
+#define UV_MAX_INT_CORES 8
+#define uv_cpu_socket_number(p) ((cpu_physical_id(p) >> 5) & 1)
+#define uv_cpu_ht_number(p) (cpu_physical_id(p) & 1)
+#define uv_cpu_core_number(p) (((cpu_physical_id(p) >> 2) & 4) | \
+ ((cpu_physical_id(p) >> 1) & 3))
+/*-----------------------------------------------------------------------------
+ * Function prototypes & externs
+ */
+struct gru_unload_context_req;
+
+extern const struct vm_operations_struct gru_vm_ops;
+extern struct device *grudev;
+
+extern struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma,
+ int tsid);
+extern struct gru_thread_state *gru_find_thread_state(struct vm_area_struct
+ *vma, int tsid);
+extern struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct
+ *vma, int tsid);
+extern struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts);
+extern void gru_load_context(struct gru_thread_state *gts);
+extern void gru_steal_context(struct gru_thread_state *gts);
+extern void gru_unload_context(struct gru_thread_state *gts, int savestate);
+extern int gru_update_cch(struct gru_thread_state *gts);
+extern void gts_drop(struct gru_thread_state *gts);
+extern void gru_tgh_flush_init(struct gru_state *gru);
+extern int gru_kservices_init(void);
+extern void gru_kservices_exit(void);
+extern irqreturn_t gru0_intr(int irq, void *dev_id);
+extern irqreturn_t gru1_intr(int irq, void *dev_id);
+extern irqreturn_t gru_intr_mblade(int irq, void *dev_id);
+extern int gru_dump_chiplet_request(unsigned long arg);
+extern long gru_get_gseg_statistics(unsigned long arg);
+extern int gru_handle_user_call_os(unsigned long address);
+extern int gru_user_flush_tlb(unsigned long arg);
+extern int gru_user_unload_context(unsigned long arg);
+extern int gru_get_exception_detail(unsigned long arg);
+extern int gru_set_context_option(unsigned long address);
+extern void gru_check_context_placement(struct gru_thread_state *gts);
+extern int gru_cpu_fault_map_id(void);
+extern struct vm_area_struct *gru_find_vma(unsigned long vaddr);
+extern void gru_flush_all_tlb(struct gru_state *gru);
+extern int gru_proc_init(void);
+extern void gru_proc_exit(void);
+
+extern struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
+ int cbr_au_count, int dsr_au_count,
+ unsigned char tlb_preload_count, int options, int tsid);
+extern unsigned long gru_reserve_cb_resources(struct gru_state *gru,
+ int cbr_au_count, char *cbmap);
+extern unsigned long gru_reserve_ds_resources(struct gru_state *gru,
+ int dsr_au_count, char *dsmap);
+extern int gru_fault(struct vm_area_struct *, struct vm_fault *vmf);
+extern struct gru_mm_struct *gru_register_mmu_notifier(void);
+extern void gru_drop_mmu_notifier(struct gru_mm_struct *gms);
+
+extern int gru_ktest(unsigned long arg);
+extern void gru_flush_tlb_range(struct gru_mm_struct *gms, unsigned long start,
+ unsigned long len);
+
+extern unsigned long gru_options;
+
+#endif /* __GRUTABLES_H__ */
diff --git a/drivers/misc/sgi-gru/grutlbpurge.c b/drivers/misc/sgi-gru/grutlbpurge.c
new file mode 100644
index 00000000..240a6d36
--- /dev/null
+++ b/drivers/misc/sgi-gru/grutlbpurge.c
@@ -0,0 +1,378 @@
+/*
+ * SN Platform GRU Driver
+ *
+ * MMUOPS callbacks + TLB flushing
+ *
+ * This file handles emu notifier callbacks from the core kernel. The callbacks
+ * are used to update the TLB in the GRU as a result of changes in the
+ * state of a process address space. This file also handles TLB invalidates
+ * from the GRU driver.
+ *
+ * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/hugetlb.h>
+#include <linux/delay.h>
+#include <linux/timex.h>
+#include <linux/srcu.h>
+#include <asm/processor.h>
+#include "gru.h"
+#include "grutables.h"
+#include <asm/uv/uv_hub.h>
+
+#define gru_random() get_cycles()
+
+/* ---------------------------------- TLB Invalidation functions --------
+ * get_tgh_handle
+ *
+ * Find a TGH to use for issuing a TLB invalidate. For GRUs that are on the
+ * local blade, use a fixed TGH that is a function of the blade-local cpu
+ * number. Normally, this TGH is private to the cpu & no contention occurs for
+ * the TGH. For offblade GRUs, select a random TGH in the range above the
+ * private TGHs. A spinlock is required to access this TGH & the lock must be
+ * released when the invalidate is completes. This sucks, but it is the best we
+ * can do.
+ *
+ * Note that the spinlock is IN the TGH handle so locking does not involve
+ * additional cache lines.
+ *
+ */
+static inline int get_off_blade_tgh(struct gru_state *gru)
+{
+ int n;
+
+ n = GRU_NUM_TGH - gru->gs_tgh_first_remote;
+ n = gru_random() % n;
+ n += gru->gs_tgh_first_remote;
+ return n;
+}
+
+static inline int get_on_blade_tgh(struct gru_state *gru)
+{
+ return uv_blade_processor_id() >> gru->gs_tgh_local_shift;
+}
+
+static struct gru_tlb_global_handle *get_lock_tgh_handle(struct gru_state
+ *gru)
+{
+ struct gru_tlb_global_handle *tgh;
+ int n;
+
+ preempt_disable();
+ if (uv_numa_blade_id() == gru->gs_blade_id)
+ n = get_on_blade_tgh(gru);
+ else
+ n = get_off_blade_tgh(gru);
+ tgh = get_tgh_by_index(gru, n);
+ lock_tgh_handle(tgh);
+
+ return tgh;
+}
+
+static void get_unlock_tgh_handle(struct gru_tlb_global_handle *tgh)
+{
+ unlock_tgh_handle(tgh);
+ preempt_enable();
+}
+
+/*
+ * gru_flush_tlb_range
+ *
+ * General purpose TLB invalidation function. This function scans every GRU in
+ * the ENTIRE system (partition) looking for GRUs where the specified MM has
+ * been accessed by the GRU. For each GRU found, the TLB must be invalidated OR
+ * the ASID invalidated. Invalidating an ASID causes a new ASID to be assigned
+ * on the next fault. This effectively flushes the ENTIRE TLB for the MM at the
+ * cost of (possibly) a large number of future TLBmisses.
+ *
+ * The current algorithm is optimized based on the following (somewhat true)
+ * assumptions:
+ * - GRU contexts are not loaded into a GRU unless a reference is made to
+ * the data segment or control block (this is true, not an assumption).
+ * If a DS/CB is referenced, the user will also issue instructions that
+ * cause TLBmisses. It is not necessary to optimize for the case where
+ * contexts are loaded but no instructions cause TLB misses. (I know
+ * this will happen but I'm not optimizing for it).
+ * - GRU instructions to invalidate TLB entries are SLOOOOWWW - normally
+ * a few usec but in unusual cases, it could be longer. Avoid if
+ * possible.
+ * - intrablade process migration between cpus is not frequent but is
+ * common.
+ * - a GRU context is not typically migrated to a different GRU on the
+ * blade because of intrablade migration
+ * - interblade migration is rare. Processes migrate their GRU context to
+ * the new blade.
+ * - if interblade migration occurs, migration back to the original blade
+ * is very very rare (ie., no optimization for this case)
+ * - most GRU instruction operate on a subset of the user REGIONS. Code
+ * & shared library regions are not likely targets of GRU instructions.
+ *
+ * To help improve the efficiency of TLB invalidation, the GMS data
+ * structure is maintained for EACH address space (MM struct). The GMS is
+ * also the structure that contains the pointer to the mmu callout
+ * functions. This structure is linked to the mm_struct for the address space
+ * using the mmu "register" function. The mmu interfaces are used to
+ * provide the callbacks for TLB invalidation. The GMS contains:
+ *
+ * - asid[maxgrus] array. ASIDs are assigned to a GRU when a context is
+ * loaded into the GRU.
+ * - asidmap[maxgrus]. bitmap to make it easier to find non-zero asids in
+ * the above array
+ * - ctxbitmap[maxgrus]. Indicates the contexts that are currently active
+ * in the GRU for the address space. This bitmap must be passed to the
+ * GRU to do an invalidate.
+ *
+ * The current algorithm for invalidating TLBs is:
+ * - scan the asidmap for GRUs where the context has been loaded, ie,
+ * asid is non-zero.
+ * - for each gru found:
+ * - if the ctxtmap is non-zero, there are active contexts in the
+ * GRU. TLB invalidate instructions must be issued to the GRU.
+ * - if the ctxtmap is zero, no context is active. Set the ASID to
+ * zero to force a full TLB invalidation. This is fast but will
+ * cause a lot of TLB misses if the context is reloaded onto the
+ * GRU
+ *
+ */
+
+void gru_flush_tlb_range(struct gru_mm_struct *gms, unsigned long start,
+ unsigned long len)
+{
+ struct gru_state *gru;
+ struct gru_mm_tracker *asids;
+ struct gru_tlb_global_handle *tgh;
+ unsigned long num;
+ int grupagesize, pagesize, pageshift, gid, asid;
+
+ /* ZZZ TODO - handle huge pages */
+ pageshift = PAGE_SHIFT;
+ pagesize = (1UL << pageshift);
+ grupagesize = GRU_PAGESIZE(pageshift);
+ num = min(((len + pagesize - 1) >> pageshift), GRUMAXINVAL);
+
+ STAT(flush_tlb);
+ gru_dbg(grudev, "gms %p, start 0x%lx, len 0x%lx, asidmap 0x%lx\n", gms,
+ start, len, gms->ms_asidmap[0]);
+
+ spin_lock(&gms->ms_asid_lock);
+ for_each_gru_in_bitmap(gid, gms->ms_asidmap) {
+ STAT(flush_tlb_gru);
+ gru = GID_TO_GRU(gid);
+ asids = gms->ms_asids + gid;
+ asid = asids->mt_asid;
+ if (asids->mt_ctxbitmap && asid) {
+ STAT(flush_tlb_gru_tgh);
+ asid = GRUASID(asid, start);
+ gru_dbg(grudev,
+ " FLUSH gruid %d, asid 0x%x, vaddr 0x%lx, vamask 0x%x, num %ld, cbmap 0x%x\n",
+ gid, asid, start, grupagesize, num, asids->mt_ctxbitmap);
+ tgh = get_lock_tgh_handle(gru);
+ tgh_invalidate(tgh, start, ~0, asid, grupagesize, 0,
+ num - 1, asids->mt_ctxbitmap);
+ get_unlock_tgh_handle(tgh);
+ } else {
+ STAT(flush_tlb_gru_zero_asid);
+ asids->mt_asid = 0;
+ __clear_bit(gru->gs_gid, gms->ms_asidmap);
+ gru_dbg(grudev,
+ " CLEARASID gruid %d, asid 0x%x, cbtmap 0x%x, asidmap 0x%lx\n",
+ gid, asid, asids->mt_ctxbitmap,
+ gms->ms_asidmap[0]);
+ }
+ }
+ spin_unlock(&gms->ms_asid_lock);
+}
+
+/*
+ * Flush the entire TLB on a chiplet.
+ */
+void gru_flush_all_tlb(struct gru_state *gru)
+{
+ struct gru_tlb_global_handle *tgh;
+
+ gru_dbg(grudev, "gid %d\n", gru->gs_gid);
+ tgh = get_lock_tgh_handle(gru);
+ tgh_invalidate(tgh, 0, ~0, 0, 1, 1, GRUMAXINVAL - 1, 0xffff);
+ get_unlock_tgh_handle(tgh);
+}
+
+/*
+ * MMUOPS notifier callout functions
+ */
+static void gru_invalidate_range_start(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct,
+ ms_notifier);
+
+ STAT(mmu_invalidate_range);
+ atomic_inc(&gms->ms_range_active);
+ gru_dbg(grudev, "gms %p, start 0x%lx, end 0x%lx, act %d\n", gms,
+ start, end, atomic_read(&gms->ms_range_active));
+ gru_flush_tlb_range(gms, start, end - start);
+}
+
+static void gru_invalidate_range_end(struct mmu_notifier *mn,
+ struct mm_struct *mm, unsigned long start,
+ unsigned long end)
+{
+ struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct,
+ ms_notifier);
+
+ /* ..._and_test() provides needed barrier */
+ (void)atomic_dec_and_test(&gms->ms_range_active);
+
+ wake_up_all(&gms->ms_wait_queue);
+ gru_dbg(grudev, "gms %p, start 0x%lx, end 0x%lx\n", gms, start, end);
+}
+
+static void gru_invalidate_page(struct mmu_notifier *mn, struct mm_struct *mm,
+ unsigned long address)
+{
+ struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct,
+ ms_notifier);
+
+ STAT(mmu_invalidate_page);
+ gru_flush_tlb_range(gms, address, PAGE_SIZE);
+ gru_dbg(grudev, "gms %p, address 0x%lx\n", gms, address);
+}
+
+static void gru_release(struct mmu_notifier *mn, struct mm_struct *mm)
+{
+ struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct,
+ ms_notifier);
+
+ gms->ms_released = 1;
+ gru_dbg(grudev, "gms %p\n", gms);
+}
+
+
+static const struct mmu_notifier_ops gru_mmuops = {
+ .invalidate_page = gru_invalidate_page,
+ .invalidate_range_start = gru_invalidate_range_start,
+ .invalidate_range_end = gru_invalidate_range_end,
+ .release = gru_release,
+};
+
+/* Move this to the basic mmu_notifier file. But for now... */
+static struct mmu_notifier *mmu_find_ops(struct mm_struct *mm,
+ const struct mmu_notifier_ops *ops)
+{
+ struct mmu_notifier *mn, *gru_mn = NULL;
+ struct hlist_node *n;
+
+ if (mm->mmu_notifier_mm) {
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list,
+ hlist)
+ if (mn->ops == ops) {
+ gru_mn = mn;
+ break;
+ }
+ rcu_read_unlock();
+ }
+ return gru_mn;
+}
+
+struct gru_mm_struct *gru_register_mmu_notifier(void)
+{
+ struct gru_mm_struct *gms;
+ struct mmu_notifier *mn;
+ int err;
+
+ mn = mmu_find_ops(current->mm, &gru_mmuops);
+ if (mn) {
+ gms = container_of(mn, struct gru_mm_struct, ms_notifier);
+ atomic_inc(&gms->ms_refcnt);
+ } else {
+ gms = kzalloc(sizeof(*gms), GFP_KERNEL);
+ if (gms) {
+ STAT(gms_alloc);
+ spin_lock_init(&gms->ms_asid_lock);
+ gms->ms_notifier.ops = &gru_mmuops;
+ atomic_set(&gms->ms_refcnt, 1);
+ init_waitqueue_head(&gms->ms_wait_queue);
+ err = __mmu_notifier_register(&gms->ms_notifier, current->mm);
+ if (err)
+ goto error;
+ }
+ }
+ gru_dbg(grudev, "gms %p, refcnt %d\n", gms,
+ atomic_read(&gms->ms_refcnt));
+ return gms;
+error:
+ kfree(gms);
+ return ERR_PTR(err);
+}
+
+void gru_drop_mmu_notifier(struct gru_mm_struct *gms)
+{
+ gru_dbg(grudev, "gms %p, refcnt %d, released %d\n", gms,
+ atomic_read(&gms->ms_refcnt), gms->ms_released);
+ if (atomic_dec_return(&gms->ms_refcnt) == 0) {
+ if (!gms->ms_released)
+ mmu_notifier_unregister(&gms->ms_notifier, current->mm);
+ kfree(gms);
+ STAT(gms_free);
+ }
+}
+
+/*
+ * Setup TGH parameters. There are:
+ * - 24 TGH handles per GRU chiplet
+ * - a portion (MAX_LOCAL_TGH) of the handles are reserved for
+ * use by blade-local cpus
+ * - the rest are used by off-blade cpus. This usage is
+ * less frequent than blade-local usage.
+ *
+ * For now, use 16 handles for local flushes, 8 for remote flushes. If the blade
+ * has less tan or equal to 16 cpus, each cpu has a unique handle that it can
+ * use.
+ */
+#define MAX_LOCAL_TGH 16
+
+void gru_tgh_flush_init(struct gru_state *gru)
+{
+ int cpus, shift = 0, n;
+
+ cpus = uv_blade_nr_possible_cpus(gru->gs_blade_id);
+
+ /* n = cpus rounded up to next power of 2 */
+ if (cpus) {
+ n = 1 << fls(cpus - 1);
+
+ /*
+ * shift count for converting local cpu# to TGH index
+ * 0 if cpus <= MAX_LOCAL_TGH,
+ * 1 if cpus <= 2*MAX_LOCAL_TGH,
+ * etc
+ */
+ shift = max(0, fls(n - 1) - fls(MAX_LOCAL_TGH - 1));
+ }
+ gru->gs_tgh_local_shift = shift;
+
+ /* first starting TGH index to use for remote purges */
+ gru->gs_tgh_first_remote = (cpus + (1 << shift) - 1) >> shift;
+
+}
diff --git a/drivers/misc/sgi-xp/Makefile b/drivers/misc/sgi-xp/Makefile
new file mode 100644
index 00000000..4fc40d8e
--- /dev/null
+++ b/drivers/misc/sgi-xp/Makefile
@@ -0,0 +1,19 @@
+#
+# Makefile for SGI's XP devices.
+#
+
+obj-$(CONFIG_SGI_XP) += xp.o
+xp-y := xp_main.o
+xp-$(CONFIG_IA64_SGI_SN2) += xp_sn2.o xp_nofault.o
+xp-$(CONFIG_IA64_GENERIC) += xp_sn2.o xp_nofault.o
+xp-$(CONFIG_IA64_SGI_UV) += xp_uv.o
+xp-$(CONFIG_X86_64) += xp_uv.o
+
+obj-$(CONFIG_SGI_XP) += xpc.o
+xpc-y := xpc_main.o xpc_channel.o xpc_partition.o
+xpc-$(CONFIG_IA64_SGI_SN2) += xpc_sn2.o
+xpc-$(CONFIG_IA64_GENERIC) += xpc_sn2.o
+xpc-$(CONFIG_IA64_SGI_UV) += xpc_uv.o
+xpc-$(CONFIG_X86_64) += xpc_uv.o
+
+obj-$(CONFIG_SGI_XP) += xpnet.o
diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
new file mode 100644
index 00000000..c862cd45
--- /dev/null
+++ b/drivers/misc/sgi-xp/xp.h
@@ -0,0 +1,358 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004-2008 Silicon Graphics, Inc. All rights reserved.
+ */
+
+/*
+ * External Cross Partition (XP) structures and defines.
+ */
+
+#ifndef _DRIVERS_MISC_SGIXP_XP_H
+#define _DRIVERS_MISC_SGIXP_XP_H
+
+#include <linux/mutex.h>
+
+#if defined CONFIG_X86_UV || defined CONFIG_IA64_SGI_UV
+#include <asm/uv/uv.h>
+#define is_uv() is_uv_system()
+#endif
+
+#ifndef is_uv
+#define is_uv() 0
+#endif
+
+#if defined CONFIG_IA64
+#include <asm/sn/arch.h> /* defines is_shub1() and is_shub2() */
+#define is_shub() ia64_platform_is("sn2")
+#endif
+
+#ifndef is_shub1
+#define is_shub1() 0
+#endif
+
+#ifndef is_shub2
+#define is_shub2() 0
+#endif
+
+#ifndef is_shub
+#define is_shub() 0
+#endif
+
+#ifdef USE_DBUG_ON
+#define DBUG_ON(condition) BUG_ON(condition)
+#else
+#define DBUG_ON(condition)
+#endif
+
+/*
+ * Define the maximum number of partitions the system can possibly support.
+ * It is based on the maximum number of hardware partitionable regions. The
+ * term 'region' in this context refers to the minimum number of nodes that
+ * can comprise an access protection grouping. The access protection is in
+ * regards to memory, IPI and IOI.
+ *
+ * The maximum number of hardware partitionable regions is equal to the
+ * maximum number of nodes in the entire system divided by the minimum number
+ * of nodes that comprise an access protection grouping.
+ */
+#define XP_MAX_NPARTITIONS_SN2 64
+#define XP_MAX_NPARTITIONS_UV 256
+
+/*
+ * XPC establishes channel connections between the local partition and any
+ * other partition that is currently up. Over these channels, kernel-level
+ * `users' can communicate with their counterparts on the other partitions.
+ *
+ * If the need for additional channels arises, one can simply increase
+ * XPC_MAX_NCHANNELS accordingly. If the day should come where that number
+ * exceeds the absolute MAXIMUM number of channels possible (eight), then one
+ * will need to make changes to the XPC code to accommodate for this.
+ *
+ * The absolute maximum number of channels possible is limited to eight for
+ * performance reasons on sn2 hardware. The internal cross partition structures
+ * require sixteen bytes per channel, and eight allows all of this
+ * interface-shared info to fit in one 128-byte cacheline.
+ */
+#define XPC_MEM_CHANNEL 0 /* memory channel number */
+#define XPC_NET_CHANNEL 1 /* network channel number */
+
+#define XPC_MAX_NCHANNELS 2 /* max #of channels allowed */
+
+#if XPC_MAX_NCHANNELS > 8
+#error XPC_MAX_NCHANNELS exceeds absolute MAXIMUM possible.
+#endif
+
+/*
+ * Define macro, XPC_MSG_SIZE(), is provided for the user
+ * that wants to fit as many msg entries as possible in a given memory size
+ * (e.g. a memory page).
+ */
+#define XPC_MSG_MAX_SIZE 128
+#define XPC_MSG_HDR_MAX_SIZE 16
+#define XPC_MSG_PAYLOAD_MAX_SIZE (XPC_MSG_MAX_SIZE - XPC_MSG_HDR_MAX_SIZE)
+
+#define XPC_MSG_SIZE(_payload_size) \
+ ALIGN(XPC_MSG_HDR_MAX_SIZE + (_payload_size), \
+ is_uv() ? 64 : 128)
+
+
+/*
+ * Define the return values and values passed to user's callout functions.
+ * (It is important to add new value codes at the end just preceding
+ * xpUnknownReason, which must have the highest numerical value.)
+ */
+enum xp_retval {
+ xpSuccess = 0,
+
+ xpNotConnected, /* 1: channel is not connected */
+ xpConnected, /* 2: channel connected (opened) */
+ xpRETIRED1, /* 3: (formerly xpDisconnected) */
+
+ xpMsgReceived, /* 4: message received */
+ xpMsgDelivered, /* 5: message delivered and acknowledged */
+
+ xpRETIRED2, /* 6: (formerly xpTransferFailed) */
+
+ xpNoWait, /* 7: operation would require wait */
+ xpRetry, /* 8: retry operation */
+ xpTimeout, /* 9: timeout in xpc_allocate_msg_wait() */
+ xpInterrupted, /* 10: interrupted wait */
+
+ xpUnequalMsgSizes, /* 11: message size disparity between sides */
+ xpInvalidAddress, /* 12: invalid address */
+
+ xpNoMemory, /* 13: no memory available for XPC structures */
+ xpLackOfResources, /* 14: insufficient resources for operation */
+ xpUnregistered, /* 15: channel is not registered */
+ xpAlreadyRegistered, /* 16: channel is already registered */
+
+ xpPartitionDown, /* 17: remote partition is down */
+ xpNotLoaded, /* 18: XPC module is not loaded */
+ xpUnloading, /* 19: this side is unloading XPC module */
+
+ xpBadMagic, /* 20: XPC MAGIC string not found */
+
+ xpReactivating, /* 21: remote partition was reactivated */
+
+ xpUnregistering, /* 22: this side is unregistering channel */
+ xpOtherUnregistering, /* 23: other side is unregistering channel */
+
+ xpCloneKThread, /* 24: cloning kernel thread */
+ xpCloneKThreadFailed, /* 25: cloning kernel thread failed */
+
+ xpNoHeartbeat, /* 26: remote partition has no heartbeat */
+
+ xpPioReadError, /* 27: PIO read error */
+ xpPhysAddrRegFailed, /* 28: registration of phys addr range failed */
+
+ xpRETIRED3, /* 29: (formerly xpBteDirectoryError) */
+ xpRETIRED4, /* 30: (formerly xpBtePoisonError) */
+ xpRETIRED5, /* 31: (formerly xpBteWriteError) */
+ xpRETIRED6, /* 32: (formerly xpBteAccessError) */
+ xpRETIRED7, /* 33: (formerly xpBtePWriteError) */
+ xpRETIRED8, /* 34: (formerly xpBtePReadError) */
+ xpRETIRED9, /* 35: (formerly xpBteTimeOutError) */
+ xpRETIRED10, /* 36: (formerly xpBteXtalkError) */
+ xpRETIRED11, /* 37: (formerly xpBteNotAvailable) */
+ xpRETIRED12, /* 38: (formerly xpBteUnmappedError) */
+
+ xpBadVersion, /* 39: bad version number */
+ xpVarsNotSet, /* 40: the XPC variables are not set up */
+ xpNoRsvdPageAddr, /* 41: unable to get rsvd page's phys addr */
+ xpInvalidPartid, /* 42: invalid partition ID */
+ xpLocalPartid, /* 43: local partition ID */
+
+ xpOtherGoingDown, /* 44: other side going down, reason unknown */
+ xpSystemGoingDown, /* 45: system is going down, reason unknown */
+ xpSystemHalt, /* 46: system is being halted */
+ xpSystemReboot, /* 47: system is being rebooted */
+ xpSystemPoweroff, /* 48: system is being powered off */
+
+ xpDisconnecting, /* 49: channel disconnecting (closing) */
+
+ xpOpenCloseError, /* 50: channel open/close protocol error */
+
+ xpDisconnected, /* 51: channel disconnected (closed) */
+
+ xpBteCopyError, /* 52: bte_copy() returned error */
+ xpSalError, /* 53: sn SAL error */
+ xpRsvdPageNotSet, /* 54: the reserved page is not set up */
+ xpPayloadTooBig, /* 55: payload too large for message slot */
+
+ xpUnsupported, /* 56: unsupported functionality or resource */
+ xpNeedMoreInfo, /* 57: more info is needed by SAL */
+
+ xpGruCopyError, /* 58: gru_copy_gru() returned error */
+ xpGruSendMqError, /* 59: gru send message queue related error */
+
+ xpBadChannelNumber, /* 60: invalid channel number */
+ xpBadMsgType, /* 61: invalid message type */
+ xpBiosError, /* 62: BIOS error */
+
+ xpUnknownReason /* 63: unknown reason - must be last in enum */
+};
+
+/*
+ * Define the callout function type used by XPC to update the user on
+ * connection activity and state changes via the user function registered
+ * by xpc_connect().
+ *
+ * Arguments:
+ *
+ * reason - reason code.
+ * partid - partition ID associated with condition.
+ * ch_number - channel # associated with condition.
+ * data - pointer to optional data.
+ * key - pointer to optional user-defined value provided as the "key"
+ * argument to xpc_connect().
+ *
+ * A reason code of xpConnected indicates that a connection has been
+ * established to the specified partition on the specified channel. The data
+ * argument indicates the max number of entries allowed in the message queue.
+ *
+ * A reason code of xpMsgReceived indicates that a XPC message arrived from
+ * the specified partition on the specified channel. The data argument
+ * specifies the address of the message's payload. The user must call
+ * xpc_received() when finished with the payload.
+ *
+ * All other reason codes indicate failure. The data argmument is NULL.
+ * When a failure reason code is received, one can assume that the channel
+ * is not connected.
+ */
+typedef void (*xpc_channel_func) (enum xp_retval reason, short partid,
+ int ch_number, void *data, void *key);
+
+/*
+ * Define the callout function type used by XPC to notify the user of
+ * messages received and delivered via the user function registered by
+ * xpc_send_notify().
+ *
+ * Arguments:
+ *
+ * reason - reason code.
+ * partid - partition ID associated with condition.
+ * ch_number - channel # associated with condition.
+ * key - pointer to optional user-defined value provided as the "key"
+ * argument to xpc_send_notify().
+ *
+ * A reason code of xpMsgDelivered indicates that the message was delivered
+ * to the intended recipient and that they have acknowledged its receipt by
+ * calling xpc_received().
+ *
+ * All other reason codes indicate failure.
+ *
+ * NOTE: The user defined function must be callable by an interrupt handler
+ * and thus cannot block.
+ */
+typedef void (*xpc_notify_func) (enum xp_retval reason, short partid,
+ int ch_number, void *key);
+
+/*
+ * The following is a registration entry. There is a global array of these,
+ * one per channel. It is used to record the connection registration made
+ * by the users of XPC. As long as a registration entry exists, for any
+ * partition that comes up, XPC will attempt to establish a connection on
+ * that channel. Notification that a connection has been made will occur via
+ * the xpc_channel_func function.
+ *
+ * The 'func' field points to the function to call when aynchronous
+ * notification is required for such events as: a connection established/lost,
+ * or an incoming message received, or an error condition encountered. A
+ * non-NULL 'func' field indicates that there is an active registration for
+ * the channel.
+ */
+struct xpc_registration {
+ struct mutex mutex;
+ xpc_channel_func func; /* function to call */
+ void *key; /* pointer to user's key */
+ u16 nentries; /* #of msg entries in local msg queue */
+ u16 entry_size; /* message queue's message entry size */
+ u32 assigned_limit; /* limit on #of assigned kthreads */
+ u32 idle_limit; /* limit on #of idle kthreads */
+} ____cacheline_aligned;
+
+#define XPC_CHANNEL_REGISTERED(_c) (xpc_registrations[_c].func != NULL)
+
+/* the following are valid xpc_send() or xpc_send_notify() flags */
+#define XPC_WAIT 0 /* wait flag */
+#define XPC_NOWAIT 1 /* no wait flag */
+
+struct xpc_interface {
+ void (*connect) (int);
+ void (*disconnect) (int);
+ enum xp_retval (*send) (short, int, u32, void *, u16);
+ enum xp_retval (*send_notify) (short, int, u32, void *, u16,
+ xpc_notify_func, void *);
+ void (*received) (short, int, void *);
+ enum xp_retval (*partid_to_nasids) (short, void *);
+};
+
+extern struct xpc_interface xpc_interface;
+
+extern void xpc_set_interface(void (*)(int),
+ void (*)(int),
+ enum xp_retval (*)(short, int, u32, void *, u16),
+ enum xp_retval (*)(short, int, u32, void *, u16,
+ xpc_notify_func, void *),
+ void (*)(short, int, void *),
+ enum xp_retval (*)(short, void *));
+extern void xpc_clear_interface(void);
+
+extern enum xp_retval xpc_connect(int, xpc_channel_func, void *, u16,
+ u16, u32, u32);
+extern void xpc_disconnect(int);
+
+static inline enum xp_retval
+xpc_send(short partid, int ch_number, u32 flags, void *payload,
+ u16 payload_size)
+{
+ return xpc_interface.send(partid, ch_number, flags, payload,
+ payload_size);
+}
+
+static inline enum xp_retval
+xpc_send_notify(short partid, int ch_number, u32 flags, void *payload,
+ u16 payload_size, xpc_notify_func func, void *key)
+{
+ return xpc_interface.send_notify(partid, ch_number, flags, payload,
+ payload_size, func, key);
+}
+
+static inline void
+xpc_received(short partid, int ch_number, void *payload)
+{
+ return xpc_interface.received(partid, ch_number, payload);
+}
+
+static inline enum xp_retval
+xpc_partid_to_nasids(short partid, void *nasids)
+{
+ return xpc_interface.partid_to_nasids(partid, nasids);
+}
+
+extern short xp_max_npartitions;
+extern short xp_partition_id;
+extern u8 xp_region_size;
+
+extern unsigned long (*xp_pa) (void *);
+extern unsigned long (*xp_socket_pa) (unsigned long);
+extern enum xp_retval (*xp_remote_memcpy) (unsigned long, const unsigned long,
+ size_t);
+extern int (*xp_cpu_to_nasid) (int);
+extern enum xp_retval (*xp_expand_memprotect) (unsigned long, unsigned long);
+extern enum xp_retval (*xp_restrict_memprotect) (unsigned long, unsigned long);
+
+extern u64 xp_nofault_PIOR_target;
+extern int xp_nofault_PIOR(void *);
+extern int xp_error_PIOR(void);
+
+extern struct device *xp;
+extern enum xp_retval xp_init_sn2(void);
+extern enum xp_retval xp_init_uv(void);
+extern void xp_exit_sn2(void);
+extern void xp_exit_uv(void);
+
+#endif /* _DRIVERS_MISC_SGIXP_XP_H */
diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
new file mode 100644
index 00000000..01be66d0
--- /dev/null
+++ b/drivers/misc/sgi-xp/xp_main.c
@@ -0,0 +1,286 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
+ */
+
+/*
+ * Cross Partition (XP) base.
+ *
+ * XP provides a base from which its users can interact
+ * with XPC, yet not be dependent on XPC.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include "xp.h"
+
+/* define the XP debug device structures to be used with dev_dbg() et al */
+
+struct device_driver xp_dbg_name = {
+ .name = "xp"
+};
+
+struct device xp_dbg_subname = {
+ .init_name = "", /* set to "" */
+ .driver = &xp_dbg_name
+};
+
+struct device *xp = &xp_dbg_subname;
+
+/* max #of partitions possible */
+short xp_max_npartitions;
+EXPORT_SYMBOL_GPL(xp_max_npartitions);
+
+short xp_partition_id;
+EXPORT_SYMBOL_GPL(xp_partition_id);
+
+u8 xp_region_size;
+EXPORT_SYMBOL_GPL(xp_region_size);
+
+unsigned long (*xp_pa) (void *addr);
+EXPORT_SYMBOL_GPL(xp_pa);
+
+unsigned long (*xp_socket_pa) (unsigned long gpa);
+EXPORT_SYMBOL_GPL(xp_socket_pa);
+
+enum xp_retval (*xp_remote_memcpy) (unsigned long dst_gpa,
+ const unsigned long src_gpa, size_t len);
+EXPORT_SYMBOL_GPL(xp_remote_memcpy);
+
+int (*xp_cpu_to_nasid) (int cpuid);
+EXPORT_SYMBOL_GPL(xp_cpu_to_nasid);
+
+enum xp_retval (*xp_expand_memprotect) (unsigned long phys_addr,
+ unsigned long size);
+EXPORT_SYMBOL_GPL(xp_expand_memprotect);
+enum xp_retval (*xp_restrict_memprotect) (unsigned long phys_addr,
+ unsigned long size);
+EXPORT_SYMBOL_GPL(xp_restrict_memprotect);
+
+/*
+ * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level
+ * users of XPC.
+ */
+struct xpc_registration xpc_registrations[XPC_MAX_NCHANNELS];
+EXPORT_SYMBOL_GPL(xpc_registrations);
+
+/*
+ * Initialize the XPC interface to indicate that XPC isn't loaded.
+ */
+static enum xp_retval
+xpc_notloaded(void)
+{
+ return xpNotLoaded;
+}
+
+struct xpc_interface xpc_interface = {
+ (void (*)(int))xpc_notloaded,
+ (void (*)(int))xpc_notloaded,
+ (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
+ (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
+ void *))xpc_notloaded,
+ (void (*)(short, int, void *))xpc_notloaded,
+ (enum xp_retval(*)(short, void *))xpc_notloaded
+};
+EXPORT_SYMBOL_GPL(xpc_interface);
+
+/*
+ * XPC calls this when it (the XPC module) has been loaded.
+ */
+void
+xpc_set_interface(void (*connect) (int),
+ void (*disconnect) (int),
+ enum xp_retval (*send) (short, int, u32, void *, u16),
+ enum xp_retval (*send_notify) (short, int, u32, void *, u16,
+ xpc_notify_func, void *),
+ void (*received) (short, int, void *),
+ enum xp_retval (*partid_to_nasids) (short, void *))
+{
+ xpc_interface.connect = connect;
+ xpc_interface.disconnect = disconnect;
+ xpc_interface.send = send;
+ xpc_interface.send_notify = send_notify;
+ xpc_interface.received = received;
+ xpc_interface.partid_to_nasids = partid_to_nasids;
+}
+EXPORT_SYMBOL_GPL(xpc_set_interface);
+
+/*
+ * XPC calls this when it (the XPC module) is being unloaded.
+ */
+void
+xpc_clear_interface(void)
+{
+ xpc_interface.connect = (void (*)(int))xpc_notloaded;
+ xpc_interface.disconnect = (void (*)(int))xpc_notloaded;
+ xpc_interface.send = (enum xp_retval(*)(short, int, u32, void *, u16))
+ xpc_notloaded;
+ xpc_interface.send_notify = (enum xp_retval(*)(short, int, u32, void *,
+ u16, xpc_notify_func,
+ void *))xpc_notloaded;
+ xpc_interface.received = (void (*)(short, int, void *))
+ xpc_notloaded;
+ xpc_interface.partid_to_nasids = (enum xp_retval(*)(short, void *))
+ xpc_notloaded;
+}
+EXPORT_SYMBOL_GPL(xpc_clear_interface);
+
+/*
+ * Register for automatic establishment of a channel connection whenever
+ * a partition comes up.
+ *
+ * Arguments:
+ *
+ * ch_number - channel # to register for connection.
+ * func - function to call for asynchronous notification of channel
+ * state changes (i.e., connection, disconnection, error) and
+ * the arrival of incoming messages.
+ * key - pointer to optional user-defined value that gets passed back
+ * to the user on any callouts made to func.
+ * payload_size - size in bytes of the XPC message's payload area which
+ * contains a user-defined message. The user should make
+ * this large enough to hold their largest message.
+ * nentries - max #of XPC message entries a message queue can contain.
+ * The actual number, which is determined when a connection
+ * is established and may be less then requested, will be
+ * passed to the user via the xpConnected callout.
+ * assigned_limit - max number of kthreads allowed to be processing
+ * messages (per connection) at any given instant.
+ * idle_limit - max number of kthreads allowed to be idle at any given
+ * instant.
+ */
+enum xp_retval
+xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
+ u16 nentries, u32 assigned_limit, u32 idle_limit)
+{
+ struct xpc_registration *registration;
+
+ DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS);
+ DBUG_ON(payload_size == 0 || nentries == 0);
+ DBUG_ON(func == NULL);
+ DBUG_ON(assigned_limit == 0 || idle_limit > assigned_limit);
+
+ if (XPC_MSG_SIZE(payload_size) > XPC_MSG_MAX_SIZE)
+ return xpPayloadTooBig;
+
+ registration = &xpc_registrations[ch_number];
+
+ if (mutex_lock_interruptible(&registration->mutex) != 0)
+ return xpInterrupted;
+
+ /* if XPC_CHANNEL_REGISTERED(ch_number) */
+ if (registration->func != NULL) {
+ mutex_unlock(&registration->mutex);
+ return xpAlreadyRegistered;
+ }
+
+ /* register the channel for connection */
+ registration->entry_size = XPC_MSG_SIZE(payload_size);
+ registration->nentries = nentries;
+ registration->assigned_limit = assigned_limit;
+ registration->idle_limit = idle_limit;
+ registration->key = key;
+ registration->func = func;
+
+ mutex_unlock(&registration->mutex);
+
+ xpc_interface.connect(ch_number);
+
+ return xpSuccess;
+}
+EXPORT_SYMBOL_GPL(xpc_connect);
+
+/*
+ * Remove the registration for automatic connection of the specified channel
+ * when a partition comes up.
+ *
+ * Before returning this xpc_disconnect() will wait for all connections on the
+ * specified channel have been closed/torndown. So the caller can be assured
+ * that they will not be receiving any more callouts from XPC to their
+ * function registered via xpc_connect().
+ *
+ * Arguments:
+ *
+ * ch_number - channel # to unregister.
+ */
+void
+xpc_disconnect(int ch_number)
+{
+ struct xpc_registration *registration;
+
+ DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS);
+
+ registration = &xpc_registrations[ch_number];
+
+ /*
+ * We've decided not to make this a down_interruptible(), since we
+ * figured XPC's users will just turn around and call xpc_disconnect()
+ * again anyways, so we might as well wait, if need be.
+ */
+ mutex_lock(&registration->mutex);
+
+ /* if !XPC_CHANNEL_REGISTERED(ch_number) */
+ if (registration->func == NULL) {
+ mutex_unlock(&registration->mutex);
+ return;
+ }
+
+ /* remove the connection registration for the specified channel */
+ registration->func = NULL;
+ registration->key = NULL;
+ registration->nentries = 0;
+ registration->entry_size = 0;
+ registration->assigned_limit = 0;
+ registration->idle_limit = 0;
+
+ xpc_interface.disconnect(ch_number);
+
+ mutex_unlock(&registration->mutex);
+
+ return;
+}
+EXPORT_SYMBOL_GPL(xpc_disconnect);
+
+int __init
+xp_init(void)
+{
+ enum xp_retval ret;
+ int ch_number;
+
+ /* initialize the connection registration mutex */
+ for (ch_number = 0; ch_number < XPC_MAX_NCHANNELS; ch_number++)
+ mutex_init(&xpc_registrations[ch_number].mutex);
+
+ if (is_shub())
+ ret = xp_init_sn2();
+ else if (is_uv())
+ ret = xp_init_uv();
+ else
+ ret = 0;
+
+ if (ret != xpSuccess)
+ return ret;
+
+ return 0;
+}
+
+module_init(xp_init);
+
+void __exit
+xp_exit(void)
+{
+ if (is_shub())
+ xp_exit_sn2();
+ else if (is_uv())
+ xp_exit_uv();
+}
+
+module_exit(xp_exit);
+
+MODULE_AUTHOR("Silicon Graphics, Inc.");
+MODULE_DESCRIPTION("Cross Partition (XP) base");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/sgi-xp/xp_nofault.S b/drivers/misc/sgi-xp/xp_nofault.S
new file mode 100644
index 00000000..e38d4331
--- /dev/null
+++ b/drivers/misc/sgi-xp/xp_nofault.S
@@ -0,0 +1,35 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
+ */
+
+/*
+ * The xp_nofault_PIOR function takes a pointer to a remote PIO register
+ * and attempts to load and consume a value from it. This function
+ * will be registered as a nofault code block. In the event that the
+ * PIO read fails, the MCA handler will force the error to look
+ * corrected and vector to the xp_error_PIOR which will return an error.
+ *
+ * The definition of "consumption" and the time it takes for an MCA
+ * to surface is processor implementation specific. This code
+ * is sufficient on Itanium through the Montvale processor family.
+ * It may need to be adjusted for future processor implementations.
+ *
+ * extern int xp_nofault_PIOR(void *remote_register);
+ */
+
+ .global xp_nofault_PIOR
+xp_nofault_PIOR:
+ mov r8=r0 // Stage a success return value
+ ld8.acq r9=[r32];; // PIO Read the specified register
+ adds r9=1,r9;; // Add to force consumption
+ srlz.i;; // Allow time for MCA to surface
+ br.ret.sptk.many b0;; // Return success
+
+ .global xp_error_PIOR
+xp_error_PIOR:
+ mov r8=1 // Return value of 1
+ br.ret.sptk.many b0;; // Return failure
diff --git a/drivers/misc/sgi-xp/xp_sn2.c b/drivers/misc/sgi-xp/xp_sn2.c
new file mode 100644
index 00000000..d8e463f8
--- /dev/null
+++ b/drivers/misc/sgi-xp/xp_sn2.c
@@ -0,0 +1,190 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
+ */
+
+/*
+ * Cross Partition (XP) sn2-based functions.
+ *
+ * Architecture specific implementation of common functions.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <asm/sn/bte.h>
+#include <asm/sn/sn_sal.h>
+#include "xp.h"
+
+/*
+ * The export of xp_nofault_PIOR needs to happen here since it is defined
+ * in drivers/misc/sgi-xp/xp_nofault.S. The target of the nofault read is
+ * defined here.
+ */
+EXPORT_SYMBOL_GPL(xp_nofault_PIOR);
+
+u64 xp_nofault_PIOR_target;
+EXPORT_SYMBOL_GPL(xp_nofault_PIOR_target);
+
+/*
+ * Register a nofault code region which performs a cross-partition PIO read.
+ * If the PIO read times out, the MCA handler will consume the error and
+ * return to a kernel-provided instruction to indicate an error. This PIO read
+ * exists because it is guaranteed to timeout if the destination is down
+ * (amo operations do not timeout on at least some CPUs on Shubs <= v1.2,
+ * which unfortunately we have to work around).
+ */
+static enum xp_retval
+xp_register_nofault_code_sn2(void)
+{
+ int ret;
+ u64 func_addr;
+ u64 err_func_addr;
+
+ func_addr = *(u64 *)xp_nofault_PIOR;
+ err_func_addr = *(u64 *)xp_error_PIOR;
+ ret = sn_register_nofault_code(func_addr, err_func_addr, err_func_addr,
+ 1, 1);
+ if (ret != 0) {
+ dev_err(xp, "can't register nofault code, error=%d\n", ret);
+ return xpSalError;
+ }
+ /*
+ * Setup the nofault PIO read target. (There is no special reason why
+ * SH_IPI_ACCESS was selected.)
+ */
+ if (is_shub1())
+ xp_nofault_PIOR_target = SH1_IPI_ACCESS;
+ else if (is_shub2())
+ xp_nofault_PIOR_target = SH2_IPI_ACCESS0;
+
+ return xpSuccess;
+}
+
+static void
+xp_unregister_nofault_code_sn2(void)
+{
+ u64 func_addr = *(u64 *)xp_nofault_PIOR;
+ u64 err_func_addr = *(u64 *)xp_error_PIOR;
+
+ /* unregister the PIO read nofault code region */
+ (void)sn_register_nofault_code(func_addr, err_func_addr,
+ err_func_addr, 1, 0);
+}
+
+/*
+ * Convert a virtual memory address to a physical memory address.
+ */
+static unsigned long
+xp_pa_sn2(void *addr)
+{
+ return __pa(addr);
+}
+
+/*
+ * Convert a global physical to a socket physical address.
+ */
+static unsigned long
+xp_socket_pa_sn2(unsigned long gpa)
+{
+ return gpa;
+}
+
+/*
+ * Wrapper for bte_copy().
+ *
+ * dst_pa - physical address of the destination of the transfer.
+ * src_pa - physical address of the source of the transfer.
+ * len - number of bytes to transfer from source to destination.
+ *
+ * Note: xp_remote_memcpy_sn2() should never be called while holding a spinlock.
+ */
+static enum xp_retval
+xp_remote_memcpy_sn2(unsigned long dst_pa, const unsigned long src_pa,
+ size_t len)
+{
+ bte_result_t ret;
+
+ ret = bte_copy(src_pa, dst_pa, len, (BTE_NOTIFY | BTE_WACQUIRE), NULL);
+ if (ret == BTE_SUCCESS)
+ return xpSuccess;
+
+ if (is_shub2()) {
+ dev_err(xp, "bte_copy() on shub2 failed, error=0x%x dst_pa="
+ "0x%016lx src_pa=0x%016lx len=%ld\\n", ret, dst_pa,
+ src_pa, len);
+ } else {
+ dev_err(xp, "bte_copy() failed, error=%d dst_pa=0x%016lx "
+ "src_pa=0x%016lx len=%ld\\n", ret, dst_pa, src_pa, len);
+ }
+
+ return xpBteCopyError;
+}
+
+static int
+xp_cpu_to_nasid_sn2(int cpuid)
+{
+ return cpuid_to_nasid(cpuid);
+}
+
+static enum xp_retval
+xp_expand_memprotect_sn2(unsigned long phys_addr, unsigned long size)
+{
+ u64 nasid_array = 0;
+ int ret;
+
+ ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_1,
+ &nasid_array);
+ if (ret != 0) {
+ dev_err(xp, "sn_change_memprotect(,, "
+ "SN_MEMPROT_ACCESS_CLASS_1,) failed ret=%d\n", ret);
+ return xpSalError;
+ }
+ return xpSuccess;
+}
+
+static enum xp_retval
+xp_restrict_memprotect_sn2(unsigned long phys_addr, unsigned long size)
+{
+ u64 nasid_array = 0;
+ int ret;
+
+ ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_0,
+ &nasid_array);
+ if (ret != 0) {
+ dev_err(xp, "sn_change_memprotect(,, "
+ "SN_MEMPROT_ACCESS_CLASS_0,) failed ret=%d\n", ret);
+ return xpSalError;
+ }
+ return xpSuccess;
+}
+
+enum xp_retval
+xp_init_sn2(void)
+{
+ BUG_ON(!is_shub());
+
+ xp_max_npartitions = XP_MAX_NPARTITIONS_SN2;
+ xp_partition_id = sn_partition_id;
+ xp_region_size = sn_region_size;
+
+ xp_pa = xp_pa_sn2;
+ xp_socket_pa = xp_socket_pa_sn2;
+ xp_remote_memcpy = xp_remote_memcpy_sn2;
+ xp_cpu_to_nasid = xp_cpu_to_nasid_sn2;
+ xp_expand_memprotect = xp_expand_memprotect_sn2;
+ xp_restrict_memprotect = xp_restrict_memprotect_sn2;
+
+ return xp_register_nofault_code_sn2();
+}
+
+void
+xp_exit_sn2(void)
+{
+ BUG_ON(!is_shub());
+
+ xp_unregister_nofault_code_sn2();
+}
+
diff --git a/drivers/misc/sgi-xp/xp_uv.c b/drivers/misc/sgi-xp/xp_uv.c
new file mode 100644
index 00000000..a0d09327
--- /dev/null
+++ b/drivers/misc/sgi-xp/xp_uv.c
@@ -0,0 +1,171 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
+ */
+
+/*
+ * Cross Partition (XP) uv-based functions.
+ *
+ * Architecture specific implementation of common functions.
+ *
+ */
+
+#include <linux/device.h>
+#include <asm/uv/uv_hub.h>
+#if defined CONFIG_X86_64
+#include <asm/uv/bios.h>
+#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
+#include <asm/sn/sn_sal.h>
+#endif
+#include "../sgi-gru/grukservices.h"
+#include "xp.h"
+
+/*
+ * Convert a virtual memory address to a physical memory address.
+ */
+static unsigned long
+xp_pa_uv(void *addr)
+{
+ return uv_gpa(addr);
+}
+
+/*
+ * Convert a global physical to socket physical address.
+ */
+static unsigned long
+xp_socket_pa_uv(unsigned long gpa)
+{
+ return uv_gpa_to_soc_phys_ram(gpa);
+}
+
+static enum xp_retval
+xp_remote_mmr_read(unsigned long dst_gpa, const unsigned long src_gpa,
+ size_t len)
+{
+ int ret;
+ unsigned long *dst_va = __va(uv_gpa_to_soc_phys_ram(dst_gpa));
+
+ BUG_ON(!uv_gpa_in_mmr_space(src_gpa));
+ BUG_ON(len != 8);
+
+ ret = gru_read_gpa(dst_va, src_gpa);
+ if (ret == 0)
+ return xpSuccess;
+
+ dev_err(xp, "gru_read_gpa() failed, dst_gpa=0x%016lx src_gpa=0x%016lx "
+ "len=%ld\n", dst_gpa, src_gpa, len);
+ return xpGruCopyError;
+}
+
+
+static enum xp_retval
+xp_remote_memcpy_uv(unsigned long dst_gpa, const unsigned long src_gpa,
+ size_t len)
+{
+ int ret;
+
+ if (uv_gpa_in_mmr_space(src_gpa))
+ return xp_remote_mmr_read(dst_gpa, src_gpa, len);
+
+ ret = gru_copy_gpa(dst_gpa, src_gpa, len);
+ if (ret == 0)
+ return xpSuccess;
+
+ dev_err(xp, "gru_copy_gpa() failed, dst_gpa=0x%016lx src_gpa=0x%016lx "
+ "len=%ld\n", dst_gpa, src_gpa, len);
+ return xpGruCopyError;
+}
+
+static int
+xp_cpu_to_nasid_uv(int cpuid)
+{
+ /* ??? Is this same as sn2 nasid in mach/part bitmaps set up by SAL? */
+ return UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpuid));
+}
+
+static enum xp_retval
+xp_expand_memprotect_uv(unsigned long phys_addr, unsigned long size)
+{
+ int ret;
+
+#if defined CONFIG_X86_64
+ ret = uv_bios_change_memprotect(phys_addr, size, UV_MEMPROT_ALLOW_RW);
+ if (ret != BIOS_STATUS_SUCCESS) {
+ dev_err(xp, "uv_bios_change_memprotect(,, "
+ "UV_MEMPROT_ALLOW_RW) failed, ret=%d\n", ret);
+ return xpBiosError;
+ }
+
+#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
+ u64 nasid_array;
+
+ ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_1,
+ &nasid_array);
+ if (ret != 0) {
+ dev_err(xp, "sn_change_memprotect(,, "
+ "SN_MEMPROT_ACCESS_CLASS_1,) failed ret=%d\n", ret);
+ return xpSalError;
+ }
+#else
+ #error not a supported configuration
+#endif
+ return xpSuccess;
+}
+
+static enum xp_retval
+xp_restrict_memprotect_uv(unsigned long phys_addr, unsigned long size)
+{
+ int ret;
+
+#if defined CONFIG_X86_64
+ ret = uv_bios_change_memprotect(phys_addr, size,
+ UV_MEMPROT_RESTRICT_ACCESS);
+ if (ret != BIOS_STATUS_SUCCESS) {
+ dev_err(xp, "uv_bios_change_memprotect(,, "
+ "UV_MEMPROT_RESTRICT_ACCESS) failed, ret=%d\n", ret);
+ return xpBiosError;
+ }
+
+#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
+ u64 nasid_array;
+
+ ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_0,
+ &nasid_array);
+ if (ret != 0) {
+ dev_err(xp, "sn_change_memprotect(,, "
+ "SN_MEMPROT_ACCESS_CLASS_0,) failed ret=%d\n", ret);
+ return xpSalError;
+ }
+#else
+ #error not a supported configuration
+#endif
+ return xpSuccess;
+}
+
+enum xp_retval
+xp_init_uv(void)
+{
+ BUG_ON(!is_uv());
+
+ xp_max_npartitions = XP_MAX_NPARTITIONS_UV;
+ xp_partition_id = sn_partition_id;
+ xp_region_size = sn_region_size;
+
+ xp_pa = xp_pa_uv;
+ xp_socket_pa = xp_socket_pa_uv;
+ xp_remote_memcpy = xp_remote_memcpy_uv;
+ xp_cpu_to_nasid = xp_cpu_to_nasid_uv;
+ xp_expand_memprotect = xp_expand_memprotect_uv;
+ xp_restrict_memprotect = xp_restrict_memprotect_uv;
+
+ return xpSuccess;
+}
+
+void
+xp_exit_uv(void)
+{
+ BUG_ON(!is_uv());
+}
diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
new file mode 100644
index 00000000..b94d5f76
--- /dev/null
+++ b/drivers/misc/sgi-xp/xpc.h
@@ -0,0 +1,1004 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2004-2009 Silicon Graphics, Inc. All Rights Reserved.
+ */
+
+/*
+ * Cross Partition Communication (XPC) structures and macros.
+ */
+
+#ifndef _DRIVERS_MISC_SGIXP_XPC_H
+#define _DRIVERS_MISC_SGIXP_XPC_H
+
+#include <linux/wait.h>
+#include <linux/completion.h>
+#include <linux/timer.h>
+#include <linux/sched.h>
+#include "xp.h"
+
+/*
+ * XPC Version numbers consist of a major and minor number. XPC can always
+ * talk to versions with same major #, and never talk to versions with a
+ * different major #.
+ */
+#define _XPC_VERSION(_maj, _min) (((_maj) << 4) | ((_min) & 0xf))
+#define XPC_VERSION_MAJOR(_v) ((_v) >> 4)
+#define XPC_VERSION_MINOR(_v) ((_v) & 0xf)
+
+/* define frequency of the heartbeat and frequency how often it's checked */
+#define XPC_HB_DEFAULT_INTERVAL 5 /* incr HB every x secs */
+#define XPC_HB_CHECK_DEFAULT_INTERVAL 20 /* check HB every x secs */
+
+/* define the process name of HB checker and the CPU it is pinned to */
+#define XPC_HB_CHECK_THREAD_NAME "xpc_hb"
+#define XPC_HB_CHECK_CPU 0
+
+/* define the process name of the discovery thread */
+#define XPC_DISCOVERY_THREAD_NAME "xpc_discovery"
+
+/*
+ * the reserved page
+ *
+ * SAL reserves one page of memory per partition for XPC. Though a full page
+ * in length (16384 bytes), its starting address is not page aligned, but it
+ * is cacheline aligned. The reserved page consists of the following:
+ *
+ * reserved page header
+ *
+ * The first two 64-byte cachelines of the reserved page contain the
+ * header (struct xpc_rsvd_page). Before SAL initialization has completed,
+ * SAL has set up the following fields of the reserved page header:
+ * SAL_signature, SAL_version, SAL_partid, and SAL_nasids_size. The
+ * other fields are set up by XPC. (xpc_rsvd_page points to the local
+ * partition's reserved page.)
+ *
+ * part_nasids mask
+ * mach_nasids mask
+ *
+ * SAL also sets up two bitmaps (or masks), one that reflects the actual
+ * nasids in this partition (part_nasids), and the other that reflects
+ * the actual nasids in the entire machine (mach_nasids). We're only
+ * interested in the even numbered nasids (which contain the processors
+ * and/or memory), so we only need half as many bits to represent the
+ * nasids. When mapping nasid to bit in a mask (or bit to nasid) be sure
+ * to either divide or multiply by 2. The part_nasids mask is located
+ * starting at the first cacheline following the reserved page header. The
+ * mach_nasids mask follows right after the part_nasids mask. The size in
+ * bytes of each mask is reflected by the reserved page header field
+ * 'SAL_nasids_size'. (Local partition's mask pointers are xpc_part_nasids
+ * and xpc_mach_nasids.)
+ *
+ * vars (ia64-sn2 only)
+ * vars part (ia64-sn2 only)
+ *
+ * Immediately following the mach_nasids mask are the XPC variables
+ * required by other partitions. First are those that are generic to all
+ * partitions (vars), followed on the next available cacheline by those
+ * which are partition specific (vars part). These are setup by XPC.
+ * (Local partition's vars pointers are xpc_vars and xpc_vars_part.)
+ *
+ * Note: Until 'ts_jiffies' is set non-zero, the partition XPC code has not been
+ * initialized.
+ */
+struct xpc_rsvd_page {
+ u64 SAL_signature; /* SAL: unique signature */
+ u64 SAL_version; /* SAL: version */
+ short SAL_partid; /* SAL: partition ID */
+ short max_npartitions; /* value of XPC_MAX_PARTITIONS */
+ u8 version;
+ u8 pad1[3]; /* align to next u64 in 1st 64-byte cacheline */
+ unsigned long ts_jiffies; /* timestamp when rsvd pg was setup by XPC */
+ union {
+ struct {
+ unsigned long vars_pa; /* phys addr */
+ } sn2;
+ struct {
+ unsigned long heartbeat_gpa; /* phys addr */
+ unsigned long activate_gru_mq_desc_gpa; /* phys addr */
+ } uv;
+ } sn;
+ u64 pad2[9]; /* align to last u64 in 2nd 64-byte cacheline */
+ u64 SAL_nasids_size; /* SAL: size of each nasid mask in bytes */
+};
+
+#define XPC_RP_VERSION _XPC_VERSION(3, 0) /* version 3.0 of the reserved page */
+
+/*
+ * Define the structures by which XPC variables can be exported to other
+ * partitions. (There are two: struct xpc_vars and struct xpc_vars_part)
+ */
+
+/*
+ * The following structure describes the partition generic variables
+ * needed by other partitions in order to properly initialize.
+ *
+ * struct xpc_vars version number also applies to struct xpc_vars_part.
+ * Changes to either structure and/or related functionality should be
+ * reflected by incrementing either the major or minor version numbers
+ * of struct xpc_vars.
+ */
+struct xpc_vars_sn2 {
+ u8 version;
+ u64 heartbeat;
+ DECLARE_BITMAP(heartbeating_to_mask, XP_MAX_NPARTITIONS_SN2);
+ u64 heartbeat_offline; /* if 0, heartbeat should be changing */
+ int activate_IRQ_nasid;
+ int activate_IRQ_phys_cpuid;
+ unsigned long vars_part_pa;
+ unsigned long amos_page_pa;/* paddr of page of amos from MSPEC driver */
+ struct amo *amos_page; /* vaddr of page of amos from MSPEC driver */
+};
+
+#define XPC_V_VERSION _XPC_VERSION(3, 1) /* version 3.1 of the cross vars */
+
+/*
+ * The following structure describes the per partition specific variables.
+ *
+ * An array of these structures, one per partition, will be defined. As a
+ * partition becomes active XPC will copy the array entry corresponding to
+ * itself from that partition. It is desirable that the size of this structure
+ * evenly divides into a 128-byte cacheline, such that none of the entries in
+ * this array crosses a 128-byte cacheline boundary. As it is now, each entry
+ * occupies 64-bytes.
+ */
+struct xpc_vars_part_sn2 {
+ u64 magic;
+
+ unsigned long openclose_args_pa; /* phys addr of open and close args */
+ unsigned long GPs_pa; /* physical address of Get/Put values */
+
+ unsigned long chctl_amo_pa; /* physical address of chctl flags' amo */
+
+ int notify_IRQ_nasid; /* nasid of where to send notify IRQs */
+ int notify_IRQ_phys_cpuid; /* CPUID of where to send notify IRQs */
+
+ u8 nchannels; /* #of defined channels supported */
+
+ u8 reserved[23]; /* pad to a full 64 bytes */
+};
+
+/*
+ * The vars_part MAGIC numbers play a part in the first contact protocol.
+ *
+ * MAGIC1 indicates that the per partition specific variables for a remote
+ * partition have been initialized by this partition.
+ *
+ * MAGIC2 indicates that this partition has pulled the remote partititions
+ * per partition variables that pertain to this partition.
+ */
+#define XPC_VP_MAGIC1_SN2 0x0053524156435058L /* 'XPCVARS\0'L (little endian) */
+#define XPC_VP_MAGIC2_SN2 0x0073726176435058L /* 'XPCvars\0'L (little endian) */
+
+/* the reserved page sizes and offsets */
+
+#define XPC_RP_HEADER_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page))
+#define XPC_RP_VARS_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_vars_sn2))
+
+#define XPC_RP_PART_NASIDS(_rp) ((unsigned long *)((u8 *)(_rp) + \
+ XPC_RP_HEADER_SIZE))
+#define XPC_RP_MACH_NASIDS(_rp) (XPC_RP_PART_NASIDS(_rp) + \
+ xpc_nasid_mask_nlongs)
+#define XPC_RP_VARS(_rp) ((struct xpc_vars_sn2 *) \
+ (XPC_RP_MACH_NASIDS(_rp) + \
+ xpc_nasid_mask_nlongs))
+
+
+/*
+ * The following structure describes the partition's heartbeat info which
+ * will be periodically read by other partitions to determine whether this
+ * XPC is still 'alive'.
+ */
+struct xpc_heartbeat_uv {
+ unsigned long value;
+ unsigned long offline; /* if 0, heartbeat should be changing */
+};
+
+/*
+ * Info pertinent to a GRU message queue using a watch list for irq generation.
+ */
+struct xpc_gru_mq_uv {
+ void *address; /* address of GRU message queue */
+ unsigned int order; /* size of GRU message queue as a power of 2 */
+ int irq; /* irq raised when message is received in mq */
+ int mmr_blade; /* blade where watchlist was allocated from */
+ unsigned long mmr_offset; /* offset of irq mmr located on mmr_blade */
+ unsigned long mmr_value; /* value of irq mmr located on mmr_blade */
+ int watchlist_num; /* number of watchlist allocatd by BIOS */
+ void *gru_mq_desc; /* opaque structure used by the GRU driver */
+};
+
+/*
+ * The activate_mq is used to send/receive GRU messages that affect XPC's
+ * partition active state and channel state. This is uv only.
+ */
+struct xpc_activate_mq_msghdr_uv {
+ unsigned int gru_msg_hdr; /* FOR GRU INTERNAL USE ONLY */
+ short partid; /* sender's partid */
+ u8 act_state; /* sender's act_state at time msg sent */
+ u8 type; /* message's type */
+ unsigned long rp_ts_jiffies; /* timestamp of sender's rp setup by XPC */
+};
+
+/* activate_mq defined message types */
+#define XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV 0
+
+#define XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV 1
+#define XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV 2
+
+#define XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV 3
+#define XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV 4
+#define XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV 5
+#define XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV 6
+#define XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV 7
+
+#define XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV 8
+#define XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV 9
+
+struct xpc_activate_mq_msg_uv {
+ struct xpc_activate_mq_msghdr_uv hdr;
+};
+
+struct xpc_activate_mq_msg_activate_req_uv {
+ struct xpc_activate_mq_msghdr_uv hdr;
+ unsigned long rp_gpa;
+ unsigned long heartbeat_gpa;
+ unsigned long activate_gru_mq_desc_gpa;
+};
+
+struct xpc_activate_mq_msg_deactivate_req_uv {
+ struct xpc_activate_mq_msghdr_uv hdr;
+ enum xp_retval reason;
+};
+
+struct xpc_activate_mq_msg_chctl_closerequest_uv {
+ struct xpc_activate_mq_msghdr_uv hdr;
+ short ch_number;
+ enum xp_retval reason;
+};
+
+struct xpc_activate_mq_msg_chctl_closereply_uv {
+ struct xpc_activate_mq_msghdr_uv hdr;
+ short ch_number;
+};
+
+struct xpc_activate_mq_msg_chctl_openrequest_uv {
+ struct xpc_activate_mq_msghdr_uv hdr;
+ short ch_number;
+ short entry_size; /* size of notify_mq's GRU messages */
+ short local_nentries; /* ??? Is this needed? What is? */
+};
+
+struct xpc_activate_mq_msg_chctl_openreply_uv {
+ struct xpc_activate_mq_msghdr_uv hdr;
+ short ch_number;
+ short remote_nentries; /* ??? Is this needed? What is? */
+ short local_nentries; /* ??? Is this needed? What is? */
+ unsigned long notify_gru_mq_desc_gpa;
+};
+
+struct xpc_activate_mq_msg_chctl_opencomplete_uv {
+ struct xpc_activate_mq_msghdr_uv hdr;
+ short ch_number;
+};
+
+/*
+ * Functions registered by add_timer() or called by kernel_thread() only
+ * allow for a single 64-bit argument. The following macros can be used to
+ * pack and unpack two (32-bit, 16-bit or 8-bit) arguments into or out from
+ * the passed argument.
+ */
+#define XPC_PACK_ARGS(_arg1, _arg2) \
+ ((((u64)_arg1) & 0xffffffff) | \
+ ((((u64)_arg2) & 0xffffffff) << 32))
+
+#define XPC_UNPACK_ARG1(_args) (((u64)_args) & 0xffffffff)
+#define XPC_UNPACK_ARG2(_args) ((((u64)_args) >> 32) & 0xffffffff)
+
+/*
+ * Define a Get/Put value pair (pointers) used with a message queue.
+ */
+struct xpc_gp_sn2 {
+ s64 get; /* Get value */
+ s64 put; /* Put value */
+};
+
+#define XPC_GP_SIZE \
+ L1_CACHE_ALIGN(sizeof(struct xpc_gp_sn2) * XPC_MAX_NCHANNELS)
+
+/*
+ * Define a structure that contains arguments associated with opening and
+ * closing a channel.
+ */
+struct xpc_openclose_args {
+ u16 reason; /* reason why channel is closing */
+ u16 entry_size; /* sizeof each message entry */
+ u16 remote_nentries; /* #of message entries in remote msg queue */
+ u16 local_nentries; /* #of message entries in local msg queue */
+ unsigned long local_msgqueue_pa; /* phys addr of local message queue */
+};
+
+#define XPC_OPENCLOSE_ARGS_SIZE \
+ L1_CACHE_ALIGN(sizeof(struct xpc_openclose_args) * \
+ XPC_MAX_NCHANNELS)
+
+
+/*
+ * Structures to define a fifo singly-linked list.
+ */
+
+struct xpc_fifo_entry_uv {
+ struct xpc_fifo_entry_uv *next;
+};
+
+struct xpc_fifo_head_uv {
+ struct xpc_fifo_entry_uv *first;
+ struct xpc_fifo_entry_uv *last;
+ spinlock_t lock;
+ int n_entries;
+};
+
+/*
+ * Define a sn2 styled message.
+ *
+ * A user-defined message resides in the payload area. The max size of the
+ * payload is defined by the user via xpc_connect().
+ *
+ * The size of a message entry (within a message queue) must be a 128-byte
+ * cacheline sized multiple in order to facilitate the BTE transfer of messages
+ * from one message queue to another.
+ */
+struct xpc_msg_sn2 {
+ u8 flags; /* FOR XPC INTERNAL USE ONLY */
+ u8 reserved[7]; /* FOR XPC INTERNAL USE ONLY */
+ s64 number; /* FOR XPC INTERNAL USE ONLY */
+
+ u64 payload; /* user defined portion of message */
+};
+
+/* struct xpc_msg_sn2 flags */
+
+#define XPC_M_SN2_DONE 0x01 /* msg has been received/consumed */
+#define XPC_M_SN2_READY 0x02 /* msg is ready to be sent */
+#define XPC_M_SN2_INTERRUPT 0x04 /* send interrupt when msg consumed */
+
+/*
+ * The format of a uv XPC notify_mq GRU message is as follows:
+ *
+ * A user-defined message resides in the payload area. The max size of the
+ * payload is defined by the user via xpc_connect().
+ *
+ * The size of a message (payload and header) sent via the GRU must be either 1
+ * or 2 GRU_CACHE_LINE_BYTES in length.
+ */
+
+struct xpc_notify_mq_msghdr_uv {
+ union {
+ unsigned int gru_msg_hdr; /* FOR GRU INTERNAL USE ONLY */
+ struct xpc_fifo_entry_uv next; /* FOR XPC INTERNAL USE ONLY */
+ } u;
+ short partid; /* FOR XPC INTERNAL USE ONLY */
+ u8 ch_number; /* FOR XPC INTERNAL USE ONLY */
+ u8 size; /* FOR XPC INTERNAL USE ONLY */
+ unsigned int msg_slot_number; /* FOR XPC INTERNAL USE ONLY */
+};
+
+struct xpc_notify_mq_msg_uv {
+ struct xpc_notify_mq_msghdr_uv hdr;
+ unsigned long payload;
+};
+
+/*
+ * Define sn2's notify entry.
+ *
+ * This is used to notify a message's sender that their message was received
+ * and consumed by the intended recipient.
+ */
+struct xpc_notify_sn2 {
+ u8 type; /* type of notification */
+
+ /* the following two fields are only used if type == XPC_N_CALL */
+ xpc_notify_func func; /* user's notify function */
+ void *key; /* pointer to user's key */
+};
+
+/* struct xpc_notify_sn2 type of notification */
+
+#define XPC_N_CALL 0x01 /* notify function provided by user */
+
+/*
+ * Define uv's version of the notify entry. It additionally is used to allocate
+ * a msg slot on the remote partition into which is copied a sent message.
+ */
+struct xpc_send_msg_slot_uv {
+ struct xpc_fifo_entry_uv next;
+ unsigned int msg_slot_number;
+ xpc_notify_func func; /* user's notify function */
+ void *key; /* pointer to user's key */
+};
+
+/*
+ * Define the structure that manages all the stuff required by a channel. In
+ * particular, they are used to manage the messages sent across the channel.
+ *
+ * This structure is private to a partition, and is NOT shared across the
+ * partition boundary.
+ *
+ * There is an array of these structures for each remote partition. It is
+ * allocated at the time a partition becomes active. The array contains one
+ * of these structures for each potential channel connection to that partition.
+ */
+
+/*
+ * The following is sn2 only.
+ *
+ * Each channel structure manages two message queues (circular buffers).
+ * They are allocated at the time a channel connection is made. One of
+ * these message queues (local_msgqueue) holds the locally created messages
+ * that are destined for the remote partition. The other of these message
+ * queues (remote_msgqueue) is a locally cached copy of the remote partition's
+ * own local_msgqueue.
+ *
+ * The following is a description of the Get/Put pointers used to manage these
+ * two message queues. Consider the local_msgqueue to be on one partition
+ * and the remote_msgqueue to be its cached copy on another partition. A
+ * description of what each of the lettered areas contains is included.
+ *
+ *
+ * local_msgqueue remote_msgqueue
+ *
+ * |/////////| |/////////|
+ * w_remote_GP.get --> +---------+ |/////////|
+ * | F | |/////////|
+ * remote_GP.get --> +---------+ +---------+ <-- local_GP->get
+ * | | | |
+ * | | | E |
+ * | | | |
+ * | | +---------+ <-- w_local_GP.get
+ * | B | |/////////|
+ * | | |////D////|
+ * | | |/////////|
+ * | | +---------+ <-- w_remote_GP.put
+ * | | |////C////|
+ * local_GP->put --> +---------+ +---------+ <-- remote_GP.put
+ * | | |/////////|
+ * | A | |/////////|
+ * | | |/////////|
+ * w_local_GP.put --> +---------+ |/////////|
+ * |/////////| |/////////|
+ *
+ *
+ * ( remote_GP.[get|put] are cached copies of the remote
+ * partition's local_GP->[get|put], and thus their values can
+ * lag behind their counterparts on the remote partition. )
+ *
+ *
+ * A - Messages that have been allocated, but have not yet been sent to the
+ * remote partition.
+ *
+ * B - Messages that have been sent, but have not yet been acknowledged by the
+ * remote partition as having been received.
+ *
+ * C - Area that needs to be prepared for the copying of sent messages, by
+ * the clearing of the message flags of any previously received messages.
+ *
+ * D - Area into which sent messages are to be copied from the remote
+ * partition's local_msgqueue and then delivered to their intended
+ * recipients. [ To allow for a multi-message copy, another pointer
+ * (next_msg_to_pull) has been added to keep track of the next message
+ * number needing to be copied (pulled). It chases after w_remote_GP.put.
+ * Any messages lying between w_local_GP.get and next_msg_to_pull have
+ * been copied and are ready to be delivered. ]
+ *
+ * E - Messages that have been copied and delivered, but have not yet been
+ * acknowledged by the recipient as having been received.
+ *
+ * F - Messages that have been acknowledged, but XPC has not yet notified the
+ * sender that the message was received by its intended recipient.
+ * This is also an area that needs to be prepared for the allocating of
+ * new messages, by the clearing of the message flags of the acknowledged
+ * messages.
+ */
+
+struct xpc_channel_sn2 {
+ struct xpc_openclose_args *local_openclose_args; /* args passed on */
+ /* opening or closing of channel */
+
+ void *local_msgqueue_base; /* base address of kmalloc'd space */
+ struct xpc_msg_sn2 *local_msgqueue; /* local message queue */
+ void *remote_msgqueue_base; /* base address of kmalloc'd space */
+ struct xpc_msg_sn2 *remote_msgqueue; /* cached copy of remote */
+ /* partition's local message queue */
+ unsigned long remote_msgqueue_pa; /* phys addr of remote partition's */
+ /* local message queue */
+
+ struct xpc_notify_sn2 *notify_queue;/* notify queue for messages sent */
+
+ /* various flavors of local and remote Get/Put values */
+
+ struct xpc_gp_sn2 *local_GP; /* local Get/Put values */
+ struct xpc_gp_sn2 remote_GP; /* remote Get/Put values */
+ struct xpc_gp_sn2 w_local_GP; /* working local Get/Put values */
+ struct xpc_gp_sn2 w_remote_GP; /* working remote Get/Put values */
+ s64 next_msg_to_pull; /* Put value of next msg to pull */
+
+ struct mutex msg_to_pull_mutex; /* next msg to pull serialization */
+};
+
+struct xpc_channel_uv {
+ void *cached_notify_gru_mq_desc; /* remote partition's notify mq's */
+ /* gru mq descriptor */
+
+ struct xpc_send_msg_slot_uv *send_msg_slots;
+ void *recv_msg_slots; /* each slot will hold a xpc_notify_mq_msg_uv */
+ /* structure plus the user's payload */
+
+ struct xpc_fifo_head_uv msg_slot_free_list;
+ struct xpc_fifo_head_uv recv_msg_list; /* deliverable payloads */
+};
+
+struct xpc_channel {
+ short partid; /* ID of remote partition connected */
+ spinlock_t lock; /* lock for updating this structure */
+ unsigned int flags; /* general flags */
+
+ enum xp_retval reason; /* reason why channel is disconnect'g */
+ int reason_line; /* line# disconnect initiated from */
+
+ u16 number; /* channel # */
+
+ u16 entry_size; /* sizeof each msg entry */
+ u16 local_nentries; /* #of msg entries in local msg queue */
+ u16 remote_nentries; /* #of msg entries in remote msg queue */
+
+ atomic_t references; /* #of external references to queues */
+
+ atomic_t n_on_msg_allocate_wq; /* #on msg allocation wait queue */
+ wait_queue_head_t msg_allocate_wq; /* msg allocation wait queue */
+
+ u8 delayed_chctl_flags; /* chctl flags received, but delayed */
+ /* action until channel disconnected */
+
+ atomic_t n_to_notify; /* #of msg senders to notify */
+
+ xpc_channel_func func; /* user's channel function */
+ void *key; /* pointer to user's key */
+
+ struct completion wdisconnect_wait; /* wait for channel disconnect */
+
+ /* kthread management related fields */
+
+ atomic_t kthreads_assigned; /* #of kthreads assigned to channel */
+ u32 kthreads_assigned_limit; /* limit on #of kthreads assigned */
+ atomic_t kthreads_idle; /* #of kthreads idle waiting for work */
+ u32 kthreads_idle_limit; /* limit on #of kthreads idle */
+ atomic_t kthreads_active; /* #of kthreads actively working */
+
+ wait_queue_head_t idle_wq; /* idle kthread wait queue */
+
+ union {
+ struct xpc_channel_sn2 sn2;
+ struct xpc_channel_uv uv;
+ } sn;
+
+} ____cacheline_aligned;
+
+/* struct xpc_channel flags */
+
+#define XPC_C_WASCONNECTED 0x00000001 /* channel was connected */
+
+#define XPC_C_ROPENCOMPLETE 0x00000002 /* remote open channel complete */
+#define XPC_C_OPENCOMPLETE 0x00000004 /* local open channel complete */
+#define XPC_C_ROPENREPLY 0x00000008 /* remote open channel reply */
+#define XPC_C_OPENREPLY 0x00000010 /* local open channel reply */
+#define XPC_C_ROPENREQUEST 0x00000020 /* remote open channel request */
+#define XPC_C_OPENREQUEST 0x00000040 /* local open channel request */
+
+#define XPC_C_SETUP 0x00000080 /* channel's msgqueues are alloc'd */
+#define XPC_C_CONNECTEDCALLOUT 0x00000100 /* connected callout initiated */
+#define XPC_C_CONNECTEDCALLOUT_MADE \
+ 0x00000200 /* connected callout completed */
+#define XPC_C_CONNECTED 0x00000400 /* local channel is connected */
+#define XPC_C_CONNECTING 0x00000800 /* channel is being connected */
+
+#define XPC_C_RCLOSEREPLY 0x00001000 /* remote close channel reply */
+#define XPC_C_CLOSEREPLY 0x00002000 /* local close channel reply */
+#define XPC_C_RCLOSEREQUEST 0x00004000 /* remote close channel request */
+#define XPC_C_CLOSEREQUEST 0x00008000 /* local close channel request */
+
+#define XPC_C_DISCONNECTED 0x00010000 /* channel is disconnected */
+#define XPC_C_DISCONNECTING 0x00020000 /* channel is being disconnected */
+#define XPC_C_DISCONNECTINGCALLOUT \
+ 0x00040000 /* disconnecting callout initiated */
+#define XPC_C_DISCONNECTINGCALLOUT_MADE \
+ 0x00080000 /* disconnecting callout completed */
+#define XPC_C_WDISCONNECT 0x00100000 /* waiting for channel disconnect */
+
+/*
+ * The channel control flags (chctl) union consists of a 64-bit variable which
+ * is divided up into eight bytes, ordered from right to left. Byte zero
+ * pertains to channel 0, byte one to channel 1, and so on. Each channel's byte
+ * can have one or more of the chctl flags set in it.
+ */
+
+union xpc_channel_ctl_flags {
+ u64 all_flags;
+ u8 flags[XPC_MAX_NCHANNELS];
+};
+
+/* chctl flags */
+#define XPC_CHCTL_CLOSEREQUEST 0x01
+#define XPC_CHCTL_CLOSEREPLY 0x02
+#define XPC_CHCTL_OPENREQUEST 0x04
+#define XPC_CHCTL_OPENREPLY 0x08
+#define XPC_CHCTL_OPENCOMPLETE 0x10
+#define XPC_CHCTL_MSGREQUEST 0x20
+
+#define XPC_OPENCLOSE_CHCTL_FLAGS \
+ (XPC_CHCTL_CLOSEREQUEST | XPC_CHCTL_CLOSEREPLY | \
+ XPC_CHCTL_OPENREQUEST | XPC_CHCTL_OPENREPLY | \
+ XPC_CHCTL_OPENCOMPLETE)
+#define XPC_MSG_CHCTL_FLAGS XPC_CHCTL_MSGREQUEST
+
+static inline int
+xpc_any_openclose_chctl_flags_set(union xpc_channel_ctl_flags *chctl)
+{
+ int ch_number;
+
+ for (ch_number = 0; ch_number < XPC_MAX_NCHANNELS; ch_number++) {
+ if (chctl->flags[ch_number] & XPC_OPENCLOSE_CHCTL_FLAGS)
+ return 1;
+ }
+ return 0;
+}
+
+static inline int
+xpc_any_msg_chctl_flags_set(union xpc_channel_ctl_flags *chctl)
+{
+ int ch_number;
+
+ for (ch_number = 0; ch_number < XPC_MAX_NCHANNELS; ch_number++) {
+ if (chctl->flags[ch_number] & XPC_MSG_CHCTL_FLAGS)
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Manage channels on a partition basis. There is one of these structures
+ * for each partition (a partition will never utilize the structure that
+ * represents itself).
+ */
+
+struct xpc_partition_sn2 {
+ unsigned long remote_amos_page_pa; /* paddr of partition's amos page */
+ int activate_IRQ_nasid; /* active partition's act/deact nasid */
+ int activate_IRQ_phys_cpuid; /* active part's act/deact phys cpuid */
+
+ unsigned long remote_vars_pa; /* phys addr of partition's vars */
+ unsigned long remote_vars_part_pa; /* paddr of partition's vars part */
+ u8 remote_vars_version; /* version# of partition's vars */
+
+ void *local_GPs_base; /* base address of kmalloc'd space */
+ struct xpc_gp_sn2 *local_GPs; /* local Get/Put values */
+ void *remote_GPs_base; /* base address of kmalloc'd space */
+ struct xpc_gp_sn2 *remote_GPs; /* copy of remote partition's local */
+ /* Get/Put values */
+ unsigned long remote_GPs_pa; /* phys addr of remote partition's local */
+ /* Get/Put values */
+
+ void *local_openclose_args_base; /* base address of kmalloc'd space */
+ struct xpc_openclose_args *local_openclose_args; /* local's args */
+ unsigned long remote_openclose_args_pa; /* phys addr of remote's args */
+
+ int notify_IRQ_nasid; /* nasid of where to send notify IRQs */
+ int notify_IRQ_phys_cpuid; /* CPUID of where to send notify IRQs */
+ char notify_IRQ_owner[8]; /* notify IRQ's owner's name */
+
+ struct amo *remote_chctl_amo_va; /* addr of remote chctl flags' amo */
+ struct amo *local_chctl_amo_va; /* address of chctl flags' amo */
+
+ struct timer_list dropped_notify_IRQ_timer; /* dropped IRQ timer */
+};
+
+struct xpc_partition_uv {
+ unsigned long heartbeat_gpa; /* phys addr of partition's heartbeat */
+ struct xpc_heartbeat_uv cached_heartbeat; /* cached copy of */
+ /* partition's heartbeat */
+ unsigned long activate_gru_mq_desc_gpa; /* phys addr of parititon's */
+ /* activate mq's gru mq */
+ /* descriptor */
+ void *cached_activate_gru_mq_desc; /* cached copy of partition's */
+ /* activate mq's gru mq descriptor */
+ struct mutex cached_activate_gru_mq_desc_mutex;
+ spinlock_t flags_lock; /* protect updating of flags */
+ unsigned int flags; /* general flags */
+ u8 remote_act_state; /* remote partition's act_state */
+ u8 act_state_req; /* act_state request from remote partition */
+ enum xp_retval reason; /* reason for deactivate act_state request */
+};
+
+/* struct xpc_partition_uv flags */
+
+#define XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV 0x00000001
+#define XPC_P_ENGAGED_UV 0x00000002
+
+/* struct xpc_partition_uv act_state change requests */
+
+#define XPC_P_ASR_ACTIVATE_UV 0x01
+#define XPC_P_ASR_REACTIVATE_UV 0x02
+#define XPC_P_ASR_DEACTIVATE_UV 0x03
+
+struct xpc_partition {
+
+ /* XPC HB infrastructure */
+
+ u8 remote_rp_version; /* version# of partition's rsvd pg */
+ unsigned long remote_rp_ts_jiffies; /* timestamp when rsvd pg setup */
+ unsigned long remote_rp_pa; /* phys addr of partition's rsvd pg */
+ u64 last_heartbeat; /* HB at last read */
+ u32 activate_IRQ_rcvd; /* IRQs since activation */
+ spinlock_t act_lock; /* protect updating of act_state */
+ u8 act_state; /* from XPC HB viewpoint */
+ enum xp_retval reason; /* reason partition is deactivating */
+ int reason_line; /* line# deactivation initiated from */
+
+ unsigned long disengage_timeout; /* timeout in jiffies */
+ struct timer_list disengage_timer;
+
+ /* XPC infrastructure referencing and teardown control */
+
+ u8 setup_state; /* infrastructure setup state */
+ wait_queue_head_t teardown_wq; /* kthread waiting to teardown infra */
+ atomic_t references; /* #of references to infrastructure */
+
+ u8 nchannels; /* #of defined channels supported */
+ atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */
+ atomic_t nchannels_engaged; /* #of channels engaged with remote part */
+ struct xpc_channel *channels; /* array of channel structures */
+
+ /* fields used for managing channel avialability and activity */
+
+ union xpc_channel_ctl_flags chctl; /* chctl flags yet to be processed */
+ spinlock_t chctl_lock; /* chctl flags lock */
+
+ void *remote_openclose_args_base; /* base address of kmalloc'd space */
+ struct xpc_openclose_args *remote_openclose_args; /* copy of remote's */
+ /* args */
+
+ /* channel manager related fields */
+
+ atomic_t channel_mgr_requests; /* #of requests to activate chan mgr */
+ wait_queue_head_t channel_mgr_wq; /* channel mgr's wait queue */
+
+ union {
+ struct xpc_partition_sn2 sn2;
+ struct xpc_partition_uv uv;
+ } sn;
+
+} ____cacheline_aligned;
+
+struct xpc_arch_operations {
+ int (*setup_partitions) (void);
+ void (*teardown_partitions) (void);
+ void (*process_activate_IRQ_rcvd) (void);
+ enum xp_retval (*get_partition_rsvd_page_pa)
+ (void *, u64 *, unsigned long *, size_t *);
+ int (*setup_rsvd_page) (struct xpc_rsvd_page *);
+
+ void (*allow_hb) (short);
+ void (*disallow_hb) (short);
+ void (*disallow_all_hbs) (void);
+ void (*increment_heartbeat) (void);
+ void (*offline_heartbeat) (void);
+ void (*online_heartbeat) (void);
+ void (*heartbeat_init) (void);
+ void (*heartbeat_exit) (void);
+ enum xp_retval (*get_remote_heartbeat) (struct xpc_partition *);
+
+ void (*request_partition_activation) (struct xpc_rsvd_page *,
+ unsigned long, int);
+ void (*request_partition_reactivation) (struct xpc_partition *);
+ void (*request_partition_deactivation) (struct xpc_partition *);
+ void (*cancel_partition_deactivation_request) (struct xpc_partition *);
+ enum xp_retval (*setup_ch_structures) (struct xpc_partition *);
+ void (*teardown_ch_structures) (struct xpc_partition *);
+
+ enum xp_retval (*make_first_contact) (struct xpc_partition *);
+
+ u64 (*get_chctl_all_flags) (struct xpc_partition *);
+ void (*send_chctl_closerequest) (struct xpc_channel *, unsigned long *);
+ void (*send_chctl_closereply) (struct xpc_channel *, unsigned long *);
+ void (*send_chctl_openrequest) (struct xpc_channel *, unsigned long *);
+ void (*send_chctl_openreply) (struct xpc_channel *, unsigned long *);
+ void (*send_chctl_opencomplete) (struct xpc_channel *, unsigned long *);
+ void (*process_msg_chctl_flags) (struct xpc_partition *, int);
+
+ enum xp_retval (*save_remote_msgqueue_pa) (struct xpc_channel *,
+ unsigned long);
+
+ enum xp_retval (*setup_msg_structures) (struct xpc_channel *);
+ void (*teardown_msg_structures) (struct xpc_channel *);
+
+ void (*indicate_partition_engaged) (struct xpc_partition *);
+ void (*indicate_partition_disengaged) (struct xpc_partition *);
+ void (*assume_partition_disengaged) (short);
+ int (*partition_engaged) (short);
+ int (*any_partition_engaged) (void);
+
+ int (*n_of_deliverable_payloads) (struct xpc_channel *);
+ enum xp_retval (*send_payload) (struct xpc_channel *, u32, void *,
+ u16, u8, xpc_notify_func, void *);
+ void *(*get_deliverable_payload) (struct xpc_channel *);
+ void (*received_payload) (struct xpc_channel *, void *);
+ void (*notify_senders_of_disconnect) (struct xpc_channel *);
+};
+
+/* struct xpc_partition act_state values (for XPC HB) */
+
+#define XPC_P_AS_INACTIVE 0x00 /* partition is not active */
+#define XPC_P_AS_ACTIVATION_REQ 0x01 /* created thread to activate */
+#define XPC_P_AS_ACTIVATING 0x02 /* activation thread started */
+#define XPC_P_AS_ACTIVE 0x03 /* xpc_partition_up() was called */
+#define XPC_P_AS_DEACTIVATING 0x04 /* partition deactivation initiated */
+
+#define XPC_DEACTIVATE_PARTITION(_p, _reason) \
+ xpc_deactivate_partition(__LINE__, (_p), (_reason))
+
+/* struct xpc_partition setup_state values */
+
+#define XPC_P_SS_UNSET 0x00 /* infrastructure was never setup */
+#define XPC_P_SS_SETUP 0x01 /* infrastructure is setup */
+#define XPC_P_SS_WTEARDOWN 0x02 /* waiting to teardown infrastructure */
+#define XPC_P_SS_TORNDOWN 0x03 /* infrastructure is torndown */
+
+/*
+ * struct xpc_partition_sn2's dropped notify IRQ timer is set to wait the
+ * following interval #of seconds before checking for dropped notify IRQs.
+ * These can occur whenever an IRQ's associated amo write doesn't complete
+ * until after the IRQ was received.
+ */
+#define XPC_DROPPED_NOTIFY_IRQ_WAIT_INTERVAL (0.25 * HZ)
+
+/* number of seconds to wait for other partitions to disengage */
+#define XPC_DISENGAGE_DEFAULT_TIMELIMIT 90
+
+/* interval in seconds to print 'waiting deactivation' messages */
+#define XPC_DEACTIVATE_PRINTMSG_INTERVAL 10
+
+#define XPC_PARTID(_p) ((short)((_p) - &xpc_partitions[0]))
+
+/* found in xp_main.c */
+extern struct xpc_registration xpc_registrations[];
+
+/* found in xpc_main.c */
+extern struct device *xpc_part;
+extern struct device *xpc_chan;
+extern struct xpc_arch_operations xpc_arch_ops;
+extern int xpc_disengage_timelimit;
+extern int xpc_disengage_timedout;
+extern int xpc_activate_IRQ_rcvd;
+extern spinlock_t xpc_activate_IRQ_rcvd_lock;
+extern wait_queue_head_t xpc_activate_IRQ_wq;
+extern void *xpc_kzalloc_cacheline_aligned(size_t, gfp_t, void **);
+extern void xpc_activate_partition(struct xpc_partition *);
+extern void xpc_activate_kthreads(struct xpc_channel *, int);
+extern void xpc_create_kthreads(struct xpc_channel *, int, int);
+extern void xpc_disconnect_wait(int);
+
+/* found in xpc_sn2.c */
+extern int xpc_init_sn2(void);
+extern void xpc_exit_sn2(void);
+
+/* found in xpc_uv.c */
+extern int xpc_init_uv(void);
+extern void xpc_exit_uv(void);
+
+/* found in xpc_partition.c */
+extern int xpc_exiting;
+extern int xpc_nasid_mask_nlongs;
+extern struct xpc_rsvd_page *xpc_rsvd_page;
+extern unsigned long *xpc_mach_nasids;
+extern struct xpc_partition *xpc_partitions;
+extern void *xpc_kmalloc_cacheline_aligned(size_t, gfp_t, void **);
+extern int xpc_setup_rsvd_page(void);
+extern void xpc_teardown_rsvd_page(void);
+extern int xpc_identify_activate_IRQ_sender(void);
+extern int xpc_partition_disengaged(struct xpc_partition *);
+extern enum xp_retval xpc_mark_partition_active(struct xpc_partition *);
+extern void xpc_mark_partition_inactive(struct xpc_partition *);
+extern void xpc_discovery(void);
+extern enum xp_retval xpc_get_remote_rp(int, unsigned long *,
+ struct xpc_rsvd_page *,
+ unsigned long *);
+extern void xpc_deactivate_partition(const int, struct xpc_partition *,
+ enum xp_retval);
+extern enum xp_retval xpc_initiate_partid_to_nasids(short, void *);
+
+/* found in xpc_channel.c */
+extern void xpc_initiate_connect(int);
+extern void xpc_initiate_disconnect(int);
+extern enum xp_retval xpc_allocate_msg_wait(struct xpc_channel *);
+extern enum xp_retval xpc_initiate_send(short, int, u32, void *, u16);
+extern enum xp_retval xpc_initiate_send_notify(short, int, u32, void *, u16,
+ xpc_notify_func, void *);
+extern void xpc_initiate_received(short, int, void *);
+extern void xpc_process_sent_chctl_flags(struct xpc_partition *);
+extern void xpc_connected_callout(struct xpc_channel *);
+extern void xpc_deliver_payload(struct xpc_channel *);
+extern void xpc_disconnect_channel(const int, struct xpc_channel *,
+ enum xp_retval, unsigned long *);
+extern void xpc_disconnect_callout(struct xpc_channel *, enum xp_retval);
+extern void xpc_partition_going_down(struct xpc_partition *, enum xp_retval);
+
+static inline void
+xpc_wakeup_channel_mgr(struct xpc_partition *part)
+{
+ if (atomic_inc_return(&part->channel_mgr_requests) == 1)
+ wake_up(&part->channel_mgr_wq);
+}
+
+/*
+ * These next two inlines are used to keep us from tearing down a channel's
+ * msg queues while a thread may be referencing them.
+ */
+static inline void
+xpc_msgqueue_ref(struct xpc_channel *ch)
+{
+ atomic_inc(&ch->references);
+}
+
+static inline void
+xpc_msgqueue_deref(struct xpc_channel *ch)
+{
+ s32 refs = atomic_dec_return(&ch->references);
+
+ DBUG_ON(refs < 0);
+ if (refs == 0)
+ xpc_wakeup_channel_mgr(&xpc_partitions[ch->partid]);
+}
+
+#define XPC_DISCONNECT_CHANNEL(_ch, _reason, _irqflgs) \
+ xpc_disconnect_channel(__LINE__, _ch, _reason, _irqflgs)
+
+/*
+ * These two inlines are used to keep us from tearing down a partition's
+ * setup infrastructure while a thread may be referencing it.
+ */
+static inline void
+xpc_part_deref(struct xpc_partition *part)
+{
+ s32 refs = atomic_dec_return(&part->references);
+
+ DBUG_ON(refs < 0);
+ if (refs == 0 && part->setup_state == XPC_P_SS_WTEARDOWN)
+ wake_up(&part->teardown_wq);
+}
+
+static inline int
+xpc_part_ref(struct xpc_partition *part)
+{
+ int setup;
+
+ atomic_inc(&part->references);
+ setup = (part->setup_state == XPC_P_SS_SETUP);
+ if (!setup)
+ xpc_part_deref(part);
+
+ return setup;
+}
+
+/*
+ * The following macro is to be used for the setting of the reason and
+ * reason_line fields in both the struct xpc_channel and struct xpc_partition
+ * structures.
+ */
+#define XPC_SET_REASON(_p, _reason, _line) \
+ { \
+ (_p)->reason = _reason; \
+ (_p)->reason_line = _line; \
+ }
+
+#endif /* _DRIVERS_MISC_SGIXP_XPC_H */
diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c
new file mode 100644
index 00000000..652593fc
--- /dev/null
+++ b/drivers/misc/sgi-xp/xpc_channel.c
@@ -0,0 +1,1011 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2004-2009 Silicon Graphics, Inc. All Rights Reserved.
+ */
+
+/*
+ * Cross Partition Communication (XPC) channel support.
+ *
+ * This is the part of XPC that manages the channels and
+ * sends/receives messages across them to/from other partitions.
+ *
+ */
+
+#include <linux/device.h>
+#include "xpc.h"
+
+/*
+ * Process a connect message from a remote partition.
+ *
+ * Note: xpc_process_connect() is expecting to be called with the
+ * spin_lock_irqsave held and will leave it locked upon return.
+ */
+static void
+xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
+{
+ enum xp_retval ret;
+
+ DBUG_ON(!spin_is_locked(&ch->lock));
+
+ if (!(ch->flags & XPC_C_OPENREQUEST) ||
+ !(ch->flags & XPC_C_ROPENREQUEST)) {
+ /* nothing more to do for now */
+ return;
+ }
+ DBUG_ON(!(ch->flags & XPC_C_CONNECTING));
+
+ if (!(ch->flags & XPC_C_SETUP)) {
+ spin_unlock_irqrestore(&ch->lock, *irq_flags);
+ ret = xpc_arch_ops.setup_msg_structures(ch);
+ spin_lock_irqsave(&ch->lock, *irq_flags);
+
+ if (ret != xpSuccess)
+ XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags);
+ else
+ ch->flags |= XPC_C_SETUP;
+
+ if (ch->flags & XPC_C_DISCONNECTING)
+ return;
+ }
+
+ if (!(ch->flags & XPC_C_OPENREPLY)) {
+ ch->flags |= XPC_C_OPENREPLY;
+ xpc_arch_ops.send_chctl_openreply(ch, irq_flags);
+ }
+
+ if (!(ch->flags & XPC_C_ROPENREPLY))
+ return;
+
+ if (!(ch->flags & XPC_C_OPENCOMPLETE)) {
+ ch->flags |= (XPC_C_OPENCOMPLETE | XPC_C_CONNECTED);
+ xpc_arch_ops.send_chctl_opencomplete(ch, irq_flags);
+ }
+
+ if (!(ch->flags & XPC_C_ROPENCOMPLETE))
+ return;
+
+ dev_info(xpc_chan, "channel %d to partition %d connected\n",
+ ch->number, ch->partid);
+
+ ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */
+}
+
+/*
+ * spin_lock_irqsave() is expected to be held on entry.
+ */
+static void
+xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
+{
+ struct xpc_partition *part = &xpc_partitions[ch->partid];
+ u32 channel_was_connected = (ch->flags & XPC_C_WASCONNECTED);
+
+ DBUG_ON(!spin_is_locked(&ch->lock));
+
+ if (!(ch->flags & XPC_C_DISCONNECTING))
+ return;
+
+ DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
+
+ /* make sure all activity has settled down first */
+
+ if (atomic_read(&ch->kthreads_assigned) > 0 ||
+ atomic_read(&ch->references) > 0) {
+ return;
+ }
+ DBUG_ON((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
+ !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE));
+
+ if (part->act_state == XPC_P_AS_DEACTIVATING) {
+ /* can't proceed until the other side disengages from us */
+ if (xpc_arch_ops.partition_engaged(ch->partid))
+ return;
+
+ } else {
+
+ /* as long as the other side is up do the full protocol */
+
+ if (!(ch->flags & XPC_C_RCLOSEREQUEST))
+ return;
+
+ if (!(ch->flags & XPC_C_CLOSEREPLY)) {
+ ch->flags |= XPC_C_CLOSEREPLY;
+ xpc_arch_ops.send_chctl_closereply(ch, irq_flags);
+ }
+
+ if (!(ch->flags & XPC_C_RCLOSEREPLY))
+ return;
+ }
+
+ /* wake those waiting for notify completion */
+ if (atomic_read(&ch->n_to_notify) > 0) {
+ /* we do callout while holding ch->lock, callout can't block */
+ xpc_arch_ops.notify_senders_of_disconnect(ch);
+ }
+
+ /* both sides are disconnected now */
+
+ if (ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE) {
+ spin_unlock_irqrestore(&ch->lock, *irq_flags);
+ xpc_disconnect_callout(ch, xpDisconnected);
+ spin_lock_irqsave(&ch->lock, *irq_flags);
+ }
+
+ DBUG_ON(atomic_read(&ch->n_to_notify) != 0);
+
+ /* it's now safe to free the channel's message queues */
+ xpc_arch_ops.teardown_msg_structures(ch);
+
+ ch->func = NULL;
+ ch->key = NULL;
+ ch->entry_size = 0;
+ ch->local_nentries = 0;
+ ch->remote_nentries = 0;
+ ch->kthreads_assigned_limit = 0;
+ ch->kthreads_idle_limit = 0;
+
+ /*
+ * Mark the channel disconnected and clear all other flags, including
+ * XPC_C_SETUP (because of call to
+ * xpc_arch_ops.teardown_msg_structures()) but not including
+ * XPC_C_WDISCONNECT (if it was set).
+ */
+ ch->flags = (XPC_C_DISCONNECTED | (ch->flags & XPC_C_WDISCONNECT));
+
+ atomic_dec(&part->nchannels_active);
+
+ if (channel_was_connected) {
+ dev_info(xpc_chan, "channel %d to partition %d disconnected, "
+ "reason=%d\n", ch->number, ch->partid, ch->reason);
+ }
+
+ if (ch->flags & XPC_C_WDISCONNECT) {
+ /* we won't lose the CPU since we're holding ch->lock */
+ complete(&ch->wdisconnect_wait);
+ } else if (ch->delayed_chctl_flags) {
+ if (part->act_state != XPC_P_AS_DEACTIVATING) {
+ /* time to take action on any delayed chctl flags */
+ spin_lock(&part->chctl_lock);
+ part->chctl.flags[ch->number] |=
+ ch->delayed_chctl_flags;
+ spin_unlock(&part->chctl_lock);
+ }
+ ch->delayed_chctl_flags = 0;
+ }
+}
+
+/*
+ * Process a change in the channel's remote connection state.
+ */
+static void
+xpc_process_openclose_chctl_flags(struct xpc_partition *part, int ch_number,
+ u8 chctl_flags)
+{
+ unsigned long irq_flags;
+ struct xpc_openclose_args *args =
+ &part->remote_openclose_args[ch_number];
+ struct xpc_channel *ch = &part->channels[ch_number];
+ enum xp_retval reason;
+ enum xp_retval ret;
+ int create_kthread = 0;
+
+ spin_lock_irqsave(&ch->lock, irq_flags);
+
+again:
+
+ if ((ch->flags & XPC_C_DISCONNECTED) &&
+ (ch->flags & XPC_C_WDISCONNECT)) {
+ /*
+ * Delay processing chctl flags until thread waiting disconnect
+ * has had a chance to see that the channel is disconnected.
+ */
+ ch->delayed_chctl_flags |= chctl_flags;
+ goto out;
+ }
+
+ if (chctl_flags & XPC_CHCTL_CLOSEREQUEST) {
+
+ dev_dbg(xpc_chan, "XPC_CHCTL_CLOSEREQUEST (reason=%d) received "
+ "from partid=%d, channel=%d\n", args->reason,
+ ch->partid, ch->number);
+
+ /*
+ * If RCLOSEREQUEST is set, we're probably waiting for
+ * RCLOSEREPLY. We should find it and a ROPENREQUEST packed
+ * with this RCLOSEREQUEST in the chctl_flags.
+ */
+
+ if (ch->flags & XPC_C_RCLOSEREQUEST) {
+ DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING));
+ DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
+ DBUG_ON(!(ch->flags & XPC_C_CLOSEREPLY));
+ DBUG_ON(ch->flags & XPC_C_RCLOSEREPLY);
+
+ DBUG_ON(!(chctl_flags & XPC_CHCTL_CLOSEREPLY));
+ chctl_flags &= ~XPC_CHCTL_CLOSEREPLY;
+ ch->flags |= XPC_C_RCLOSEREPLY;
+
+ /* both sides have finished disconnecting */
+ xpc_process_disconnect(ch, &irq_flags);
+ DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED));
+ goto again;
+ }
+
+ if (ch->flags & XPC_C_DISCONNECTED) {
+ if (!(chctl_flags & XPC_CHCTL_OPENREQUEST)) {
+ if (part->chctl.flags[ch_number] &
+ XPC_CHCTL_OPENREQUEST) {
+
+ DBUG_ON(ch->delayed_chctl_flags != 0);
+ spin_lock(&part->chctl_lock);
+ part->chctl.flags[ch_number] |=
+ XPC_CHCTL_CLOSEREQUEST;
+ spin_unlock(&part->chctl_lock);
+ }
+ goto out;
+ }
+
+ XPC_SET_REASON(ch, 0, 0);
+ ch->flags &= ~XPC_C_DISCONNECTED;
+
+ atomic_inc(&part->nchannels_active);
+ ch->flags |= (XPC_C_CONNECTING | XPC_C_ROPENREQUEST);
+ }
+
+ chctl_flags &= ~(XPC_CHCTL_OPENREQUEST | XPC_CHCTL_OPENREPLY |
+ XPC_CHCTL_OPENCOMPLETE);
+
+ /*
+ * The meaningful CLOSEREQUEST connection state fields are:
+ * reason = reason connection is to be closed
+ */
+
+ ch->flags |= XPC_C_RCLOSEREQUEST;
+
+ if (!(ch->flags & XPC_C_DISCONNECTING)) {
+ reason = args->reason;
+ if (reason <= xpSuccess || reason > xpUnknownReason)
+ reason = xpUnknownReason;
+ else if (reason == xpUnregistering)
+ reason = xpOtherUnregistering;
+
+ XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
+
+ DBUG_ON(chctl_flags & XPC_CHCTL_CLOSEREPLY);
+ goto out;
+ }
+
+ xpc_process_disconnect(ch, &irq_flags);
+ }
+
+ if (chctl_flags & XPC_CHCTL_CLOSEREPLY) {
+
+ dev_dbg(xpc_chan, "XPC_CHCTL_CLOSEREPLY received from partid="
+ "%d, channel=%d\n", ch->partid, ch->number);
+
+ if (ch->flags & XPC_C_DISCONNECTED) {
+ DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING);
+ goto out;
+ }
+
+ DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
+
+ if (!(ch->flags & XPC_C_RCLOSEREQUEST)) {
+ if (part->chctl.flags[ch_number] &
+ XPC_CHCTL_CLOSEREQUEST) {
+
+ DBUG_ON(ch->delayed_chctl_flags != 0);
+ spin_lock(&part->chctl_lock);
+ part->chctl.flags[ch_number] |=
+ XPC_CHCTL_CLOSEREPLY;
+ spin_unlock(&part->chctl_lock);
+ }
+ goto out;
+ }
+
+ ch->flags |= XPC_C_RCLOSEREPLY;
+
+ if (ch->flags & XPC_C_CLOSEREPLY) {
+ /* both sides have finished disconnecting */
+ xpc_process_disconnect(ch, &irq_flags);
+ }
+ }
+
+ if (chctl_flags & XPC_CHCTL_OPENREQUEST) {
+
+ dev_dbg(xpc_chan, "XPC_CHCTL_OPENREQUEST (entry_size=%d, "
+ "local_nentries=%d) received from partid=%d, "
+ "channel=%d\n", args->entry_size, args->local_nentries,
+ ch->partid, ch->number);
+
+ if (part->act_state == XPC_P_AS_DEACTIVATING ||
+ (ch->flags & XPC_C_ROPENREQUEST)) {
+ goto out;
+ }
+
+ if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_WDISCONNECT)) {
+ ch->delayed_chctl_flags |= XPC_CHCTL_OPENREQUEST;
+ goto out;
+ }
+ DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED |
+ XPC_C_OPENREQUEST)));
+ DBUG_ON(ch->flags & (XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
+ XPC_C_OPENREPLY | XPC_C_CONNECTED));
+
+ /*
+ * The meaningful OPENREQUEST connection state fields are:
+ * entry_size = size of channel's messages in bytes
+ * local_nentries = remote partition's local_nentries
+ */
+ if (args->entry_size == 0 || args->local_nentries == 0) {
+ /* assume OPENREQUEST was delayed by mistake */
+ goto out;
+ }
+
+ ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING);
+ ch->remote_nentries = args->local_nentries;
+
+ if (ch->flags & XPC_C_OPENREQUEST) {
+ if (args->entry_size != ch->entry_size) {
+ XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes,
+ &irq_flags);
+ goto out;
+ }
+ } else {
+ ch->entry_size = args->entry_size;
+
+ XPC_SET_REASON(ch, 0, 0);
+ ch->flags &= ~XPC_C_DISCONNECTED;
+
+ atomic_inc(&part->nchannels_active);
+ }
+
+ xpc_process_connect(ch, &irq_flags);
+ }
+
+ if (chctl_flags & XPC_CHCTL_OPENREPLY) {
+
+ dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY (local_msgqueue_pa="
+ "0x%lx, local_nentries=%d, remote_nentries=%d) "
+ "received from partid=%d, channel=%d\n",
+ args->local_msgqueue_pa, args->local_nentries,
+ args->remote_nentries, ch->partid, ch->number);
+
+ if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED))
+ goto out;
+
+ if (!(ch->flags & XPC_C_OPENREQUEST)) {
+ XPC_DISCONNECT_CHANNEL(ch, xpOpenCloseError,
+ &irq_flags);
+ goto out;
+ }
+
+ DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST));
+ DBUG_ON(ch->flags & XPC_C_CONNECTED);
+
+ /*
+ * The meaningful OPENREPLY connection state fields are:
+ * local_msgqueue_pa = physical address of remote
+ * partition's local_msgqueue
+ * local_nentries = remote partition's local_nentries
+ * remote_nentries = remote partition's remote_nentries
+ */
+ DBUG_ON(args->local_msgqueue_pa == 0);
+ DBUG_ON(args->local_nentries == 0);
+ DBUG_ON(args->remote_nentries == 0);
+
+ ret = xpc_arch_ops.save_remote_msgqueue_pa(ch,
+ args->local_msgqueue_pa);
+ if (ret != xpSuccess) {
+ XPC_DISCONNECT_CHANNEL(ch, ret, &irq_flags);
+ goto out;
+ }
+ ch->flags |= XPC_C_ROPENREPLY;
+
+ if (args->local_nentries < ch->remote_nentries) {
+ dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY: new "
+ "remote_nentries=%d, old remote_nentries=%d, "
+ "partid=%d, channel=%d\n",
+ args->local_nentries, ch->remote_nentries,
+ ch->partid, ch->number);
+
+ ch->remote_nentries = args->local_nentries;
+ }
+ if (args->remote_nentries < ch->local_nentries) {
+ dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY: new "
+ "local_nentries=%d, old local_nentries=%d, "
+ "partid=%d, channel=%d\n",
+ args->remote_nentries, ch->local_nentries,
+ ch->partid, ch->number);
+
+ ch->local_nentries = args->remote_nentries;
+ }
+
+ xpc_process_connect(ch, &irq_flags);
+ }
+
+ if (chctl_flags & XPC_CHCTL_OPENCOMPLETE) {
+
+ dev_dbg(xpc_chan, "XPC_CHCTL_OPENCOMPLETE received from "
+ "partid=%d, channel=%d\n", ch->partid, ch->number);
+
+ if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED))
+ goto out;
+
+ if (!(ch->flags & XPC_C_OPENREQUEST) ||
+ !(ch->flags & XPC_C_OPENREPLY)) {
+ XPC_DISCONNECT_CHANNEL(ch, xpOpenCloseError,
+ &irq_flags);
+ goto out;
+ }
+
+ DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST));
+ DBUG_ON(!(ch->flags & XPC_C_ROPENREPLY));
+ DBUG_ON(!(ch->flags & XPC_C_CONNECTED));
+
+ ch->flags |= XPC_C_ROPENCOMPLETE;
+
+ xpc_process_connect(ch, &irq_flags);
+ create_kthread = 1;
+ }
+
+out:
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+
+ if (create_kthread)
+ xpc_create_kthreads(ch, 1, 0);
+}
+
+/*
+ * Attempt to establish a channel connection to a remote partition.
+ */
+static enum xp_retval
+xpc_connect_channel(struct xpc_channel *ch)
+{
+ unsigned long irq_flags;
+ struct xpc_registration *registration = &xpc_registrations[ch->number];
+
+ if (mutex_trylock(&registration->mutex) == 0)
+ return xpRetry;
+
+ if (!XPC_CHANNEL_REGISTERED(ch->number)) {
+ mutex_unlock(&registration->mutex);
+ return xpUnregistered;
+ }
+
+ spin_lock_irqsave(&ch->lock, irq_flags);
+
+ DBUG_ON(ch->flags & XPC_C_CONNECTED);
+ DBUG_ON(ch->flags & XPC_C_OPENREQUEST);
+
+ if (ch->flags & XPC_C_DISCONNECTING) {
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ mutex_unlock(&registration->mutex);
+ return ch->reason;
+ }
+
+ /* add info from the channel connect registration to the channel */
+
+ ch->kthreads_assigned_limit = registration->assigned_limit;
+ ch->kthreads_idle_limit = registration->idle_limit;
+ DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0);
+ DBUG_ON(atomic_read(&ch->kthreads_idle) != 0);
+ DBUG_ON(atomic_read(&ch->kthreads_active) != 0);
+
+ ch->func = registration->func;
+ DBUG_ON(registration->func == NULL);
+ ch->key = registration->key;
+
+ ch->local_nentries = registration->nentries;
+
+ if (ch->flags & XPC_C_ROPENREQUEST) {
+ if (registration->entry_size != ch->entry_size) {
+ /* the local and remote sides aren't the same */
+
+ /*
+ * Because XPC_DISCONNECT_CHANNEL() can block we're
+ * forced to up the registration sema before we unlock
+ * the channel lock. But that's okay here because we're
+ * done with the part that required the registration
+ * sema. XPC_DISCONNECT_CHANNEL() requires that the
+ * channel lock be locked and will unlock and relock
+ * the channel lock as needed.
+ */
+ mutex_unlock(&registration->mutex);
+ XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes,
+ &irq_flags);
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ return xpUnequalMsgSizes;
+ }
+ } else {
+ ch->entry_size = registration->entry_size;
+
+ XPC_SET_REASON(ch, 0, 0);
+ ch->flags &= ~XPC_C_DISCONNECTED;
+
+ atomic_inc(&xpc_partitions[ch->partid].nchannels_active);
+ }
+
+ mutex_unlock(&registration->mutex);
+
+ /* initiate the connection */
+
+ ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING);
+ xpc_arch_ops.send_chctl_openrequest(ch, &irq_flags);
+
+ xpc_process_connect(ch, &irq_flags);
+
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+
+ return xpSuccess;
+}
+
+void
+xpc_process_sent_chctl_flags(struct xpc_partition *part)
+{
+ unsigned long irq_flags;
+ union xpc_channel_ctl_flags chctl;
+ struct xpc_channel *ch;
+ int ch_number;
+ u32 ch_flags;
+
+ chctl.all_flags = xpc_arch_ops.get_chctl_all_flags(part);
+
+ /*
+ * Initiate channel connections for registered channels.
+ *
+ * For each connected channel that has pending messages activate idle
+ * kthreads and/or create new kthreads as needed.
+ */
+
+ for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
+ ch = &part->channels[ch_number];
+
+ /*
+ * Process any open or close related chctl flags, and then deal
+ * with connecting or disconnecting the channel as required.
+ */
+
+ if (chctl.flags[ch_number] & XPC_OPENCLOSE_CHCTL_FLAGS) {
+ xpc_process_openclose_chctl_flags(part, ch_number,
+ chctl.flags[ch_number]);
+ }
+
+ ch_flags = ch->flags; /* need an atomic snapshot of flags */
+
+ if (ch_flags & XPC_C_DISCONNECTING) {
+ spin_lock_irqsave(&ch->lock, irq_flags);
+ xpc_process_disconnect(ch, &irq_flags);
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ continue;
+ }
+
+ if (part->act_state == XPC_P_AS_DEACTIVATING)
+ continue;
+
+ if (!(ch_flags & XPC_C_CONNECTED)) {
+ if (!(ch_flags & XPC_C_OPENREQUEST)) {
+ DBUG_ON(ch_flags & XPC_C_SETUP);
+ (void)xpc_connect_channel(ch);
+ }
+ continue;
+ }
+
+ /*
+ * Process any message related chctl flags, this may involve
+ * the activation of kthreads to deliver any pending messages
+ * sent from the other partition.
+ */
+
+ if (chctl.flags[ch_number] & XPC_MSG_CHCTL_FLAGS)
+ xpc_arch_ops.process_msg_chctl_flags(part, ch_number);
+ }
+}
+
+/*
+ * XPC's heartbeat code calls this function to inform XPC that a partition is
+ * going down. XPC responds by tearing down the XPartition Communication
+ * infrastructure used for the just downed partition.
+ *
+ * XPC's heartbeat code will never call this function and xpc_partition_up()
+ * at the same time. Nor will it ever make multiple calls to either function
+ * at the same time.
+ */
+void
+xpc_partition_going_down(struct xpc_partition *part, enum xp_retval reason)
+{
+ unsigned long irq_flags;
+ int ch_number;
+ struct xpc_channel *ch;
+
+ dev_dbg(xpc_chan, "deactivating partition %d, reason=%d\n",
+ XPC_PARTID(part), reason);
+
+ if (!xpc_part_ref(part)) {
+ /* infrastructure for this partition isn't currently set up */
+ return;
+ }
+
+ /* disconnect channels associated with the partition going down */
+
+ for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
+ ch = &part->channels[ch_number];
+
+ xpc_msgqueue_ref(ch);
+ spin_lock_irqsave(&ch->lock, irq_flags);
+
+ XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
+
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ xpc_msgqueue_deref(ch);
+ }
+
+ xpc_wakeup_channel_mgr(part);
+
+ xpc_part_deref(part);
+}
+
+/*
+ * Called by XP at the time of channel connection registration to cause
+ * XPC to establish connections to all currently active partitions.
+ */
+void
+xpc_initiate_connect(int ch_number)
+{
+ short partid;
+ struct xpc_partition *part;
+ struct xpc_channel *ch;
+
+ DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS);
+
+ for (partid = 0; partid < xp_max_npartitions; partid++) {
+ part = &xpc_partitions[partid];
+
+ if (xpc_part_ref(part)) {
+ ch = &part->channels[ch_number];
+
+ /*
+ * Initiate the establishment of a connection on the
+ * newly registered channel to the remote partition.
+ */
+ xpc_wakeup_channel_mgr(part);
+ xpc_part_deref(part);
+ }
+ }
+}
+
+void
+xpc_connected_callout(struct xpc_channel *ch)
+{
+ /* let the registerer know that a connection has been established */
+
+ if (ch->func != NULL) {
+ dev_dbg(xpc_chan, "ch->func() called, reason=xpConnected, "
+ "partid=%d, channel=%d\n", ch->partid, ch->number);
+
+ ch->func(xpConnected, ch->partid, ch->number,
+ (void *)(u64)ch->local_nentries, ch->key);
+
+ dev_dbg(xpc_chan, "ch->func() returned, reason=xpConnected, "
+ "partid=%d, channel=%d\n", ch->partid, ch->number);
+ }
+}
+
+/*
+ * Called by XP at the time of channel connection unregistration to cause
+ * XPC to teardown all current connections for the specified channel.
+ *
+ * Before returning xpc_initiate_disconnect() will wait until all connections
+ * on the specified channel have been closed/torndown. So the caller can be
+ * assured that they will not be receiving any more callouts from XPC to the
+ * function they registered via xpc_connect().
+ *
+ * Arguments:
+ *
+ * ch_number - channel # to unregister.
+ */
+void
+xpc_initiate_disconnect(int ch_number)
+{
+ unsigned long irq_flags;
+ short partid;
+ struct xpc_partition *part;
+ struct xpc_channel *ch;
+
+ DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS);
+
+ /* initiate the channel disconnect for every active partition */
+ for (partid = 0; partid < xp_max_npartitions; partid++) {
+ part = &xpc_partitions[partid];
+
+ if (xpc_part_ref(part)) {
+ ch = &part->channels[ch_number];
+ xpc_msgqueue_ref(ch);
+
+ spin_lock_irqsave(&ch->lock, irq_flags);
+
+ if (!(ch->flags & XPC_C_DISCONNECTED)) {
+ ch->flags |= XPC_C_WDISCONNECT;
+
+ XPC_DISCONNECT_CHANNEL(ch, xpUnregistering,
+ &irq_flags);
+ }
+
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+
+ xpc_msgqueue_deref(ch);
+ xpc_part_deref(part);
+ }
+ }
+
+ xpc_disconnect_wait(ch_number);
+}
+
+/*
+ * To disconnect a channel, and reflect it back to all who may be waiting.
+ *
+ * An OPEN is not allowed until XPC_C_DISCONNECTING is cleared by
+ * xpc_process_disconnect(), and if set, XPC_C_WDISCONNECT is cleared by
+ * xpc_disconnect_wait().
+ *
+ * THE CHANNEL IS TO BE LOCKED BY THE CALLER AND WILL REMAIN LOCKED UPON RETURN.
+ */
+void
+xpc_disconnect_channel(const int line, struct xpc_channel *ch,
+ enum xp_retval reason, unsigned long *irq_flags)
+{
+ u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED);
+
+ DBUG_ON(!spin_is_locked(&ch->lock));
+
+ if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED))
+ return;
+
+ DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED)));
+
+ dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n",
+ reason, line, ch->partid, ch->number);
+
+ XPC_SET_REASON(ch, reason, line);
+
+ ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING);
+ /* some of these may not have been set */
+ ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY |
+ XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
+ XPC_C_CONNECTING | XPC_C_CONNECTED);
+
+ xpc_arch_ops.send_chctl_closerequest(ch, irq_flags);
+
+ if (channel_was_connected)
+ ch->flags |= XPC_C_WASCONNECTED;
+
+ spin_unlock_irqrestore(&ch->lock, *irq_flags);
+
+ /* wake all idle kthreads so they can exit */
+ if (atomic_read(&ch->kthreads_idle) > 0) {
+ wake_up_all(&ch->idle_wq);
+
+ } else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
+ !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
+ /* start a kthread that will do the xpDisconnecting callout */
+ xpc_create_kthreads(ch, 1, 1);
+ }
+
+ /* wake those waiting to allocate an entry from the local msg queue */
+ if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
+ wake_up(&ch->msg_allocate_wq);
+
+ spin_lock_irqsave(&ch->lock, *irq_flags);
+}
+
+void
+xpc_disconnect_callout(struct xpc_channel *ch, enum xp_retval reason)
+{
+ /*
+ * Let the channel's registerer know that the channel is being
+ * disconnected. We don't want to do this if the registerer was never
+ * informed of a connection being made.
+ */
+
+ if (ch->func != NULL) {
+ dev_dbg(xpc_chan, "ch->func() called, reason=%d, partid=%d, "
+ "channel=%d\n", reason, ch->partid, ch->number);
+
+ ch->func(reason, ch->partid, ch->number, NULL, ch->key);
+
+ dev_dbg(xpc_chan, "ch->func() returned, reason=%d, partid=%d, "
+ "channel=%d\n", reason, ch->partid, ch->number);
+ }
+}
+
+/*
+ * Wait for a message entry to become available for the specified channel,
+ * but don't wait any longer than 1 jiffy.
+ */
+enum xp_retval
+xpc_allocate_msg_wait(struct xpc_channel *ch)
+{
+ enum xp_retval ret;
+
+ if (ch->flags & XPC_C_DISCONNECTING) {
+ DBUG_ON(ch->reason == xpInterrupted);
+ return ch->reason;
+ }
+
+ atomic_inc(&ch->n_on_msg_allocate_wq);
+ ret = interruptible_sleep_on_timeout(&ch->msg_allocate_wq, 1);
+ atomic_dec(&ch->n_on_msg_allocate_wq);
+
+ if (ch->flags & XPC_C_DISCONNECTING) {
+ ret = ch->reason;
+ DBUG_ON(ch->reason == xpInterrupted);
+ } else if (ret == 0) {
+ ret = xpTimeout;
+ } else {
+ ret = xpInterrupted;
+ }
+
+ return ret;
+}
+
+/*
+ * Send a message that contains the user's payload on the specified channel
+ * connected to the specified partition.
+ *
+ * NOTE that this routine can sleep waiting for a message entry to become
+ * available. To not sleep, pass in the XPC_NOWAIT flag.
+ *
+ * Once sent, this routine will not wait for the message to be received, nor
+ * will notification be given when it does happen.
+ *
+ * Arguments:
+ *
+ * partid - ID of partition to which the channel is connected.
+ * ch_number - channel # to send message on.
+ * flags - see xp.h for valid flags.
+ * payload - pointer to the payload which is to be sent.
+ * payload_size - size of the payload in bytes.
+ */
+enum xp_retval
+xpc_initiate_send(short partid, int ch_number, u32 flags, void *payload,
+ u16 payload_size)
+{
+ struct xpc_partition *part = &xpc_partitions[partid];
+ enum xp_retval ret = xpUnknownReason;
+
+ dev_dbg(xpc_chan, "payload=0x%p, partid=%d, channel=%d\n", payload,
+ partid, ch_number);
+
+ DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
+ DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
+ DBUG_ON(payload == NULL);
+
+ if (xpc_part_ref(part)) {
+ ret = xpc_arch_ops.send_payload(&part->channels[ch_number],
+ flags, payload, payload_size, 0, NULL, NULL);
+ xpc_part_deref(part);
+ }
+
+ return ret;
+}
+
+/*
+ * Send a message that contains the user's payload on the specified channel
+ * connected to the specified partition.
+ *
+ * NOTE that this routine can sleep waiting for a message entry to become
+ * available. To not sleep, pass in the XPC_NOWAIT flag.
+ *
+ * This routine will not wait for the message to be sent or received.
+ *
+ * Once the remote end of the channel has received the message, the function
+ * passed as an argument to xpc_initiate_send_notify() will be called. This
+ * allows the sender to free up or re-use any buffers referenced by the
+ * message, but does NOT mean the message has been processed at the remote
+ * end by a receiver.
+ *
+ * If this routine returns an error, the caller's function will NOT be called.
+ *
+ * Arguments:
+ *
+ * partid - ID of partition to which the channel is connected.
+ * ch_number - channel # to send message on.
+ * flags - see xp.h for valid flags.
+ * payload - pointer to the payload which is to be sent.
+ * payload_size - size of the payload in bytes.
+ * func - function to call with asynchronous notification of message
+ * receipt. THIS FUNCTION MUST BE NON-BLOCKING.
+ * key - user-defined key to be passed to the function when it's called.
+ */
+enum xp_retval
+xpc_initiate_send_notify(short partid, int ch_number, u32 flags, void *payload,
+ u16 payload_size, xpc_notify_func func, void *key)
+{
+ struct xpc_partition *part = &xpc_partitions[partid];
+ enum xp_retval ret = xpUnknownReason;
+
+ dev_dbg(xpc_chan, "payload=0x%p, partid=%d, channel=%d\n", payload,
+ partid, ch_number);
+
+ DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
+ DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
+ DBUG_ON(payload == NULL);
+ DBUG_ON(func == NULL);
+
+ if (xpc_part_ref(part)) {
+ ret = xpc_arch_ops.send_payload(&part->channels[ch_number],
+ flags, payload, payload_size, XPC_N_CALL, func, key);
+ xpc_part_deref(part);
+ }
+ return ret;
+}
+
+/*
+ * Deliver a message's payload to its intended recipient.
+ */
+void
+xpc_deliver_payload(struct xpc_channel *ch)
+{
+ void *payload;
+
+ payload = xpc_arch_ops.get_deliverable_payload(ch);
+ if (payload != NULL) {
+
+ /*
+ * This ref is taken to protect the payload itself from being
+ * freed before the user is finished with it, which the user
+ * indicates by calling xpc_initiate_received().
+ */
+ xpc_msgqueue_ref(ch);
+
+ atomic_inc(&ch->kthreads_active);
+
+ if (ch->func != NULL) {
+ dev_dbg(xpc_chan, "ch->func() called, payload=0x%p "
+ "partid=%d channel=%d\n", payload, ch->partid,
+ ch->number);
+
+ /* deliver the message to its intended recipient */
+ ch->func(xpMsgReceived, ch->partid, ch->number, payload,
+ ch->key);
+
+ dev_dbg(xpc_chan, "ch->func() returned, payload=0x%p "
+ "partid=%d channel=%d\n", payload, ch->partid,
+ ch->number);
+ }
+
+ atomic_dec(&ch->kthreads_active);
+ }
+}
+
+/*
+ * Acknowledge receipt of a delivered message's payload.
+ *
+ * This function, although called by users, does not call xpc_part_ref() to
+ * ensure that the partition infrastructure is in place. It relies on the
+ * fact that we called xpc_msgqueue_ref() in xpc_deliver_payload().
+ *
+ * Arguments:
+ *
+ * partid - ID of partition to which the channel is connected.
+ * ch_number - channel # message received on.
+ * payload - pointer to the payload area allocated via
+ * xpc_initiate_send() or xpc_initiate_send_notify().
+ */
+void
+xpc_initiate_received(short partid, int ch_number, void *payload)
+{
+ struct xpc_partition *part = &xpc_partitions[partid];
+ struct xpc_channel *ch;
+
+ DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
+ DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
+
+ ch = &part->channels[ch_number];
+ xpc_arch_ops.received_payload(ch, payload);
+
+ /* the call to xpc_msgqueue_ref() was done by xpc_deliver_payload() */
+ xpc_msgqueue_deref(ch);
+}
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
new file mode 100644
index 00000000..8d082b46
--- /dev/null
+++ b/drivers/misc/sgi-xp/xpc_main.c
@@ -0,0 +1,1344 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2004-2009 Silicon Graphics, Inc. All Rights Reserved.
+ */
+
+/*
+ * Cross Partition Communication (XPC) support - standard version.
+ *
+ * XPC provides a message passing capability that crosses partition
+ * boundaries. This module is made up of two parts:
+ *
+ * partition This part detects the presence/absence of other
+ * partitions. It provides a heartbeat and monitors
+ * the heartbeats of other partitions.
+ *
+ * channel This part manages the channels and sends/receives
+ * messages across them to/from other partitions.
+ *
+ * There are a couple of additional functions residing in XP, which
+ * provide an interface to XPC for its users.
+ *
+ *
+ * Caveats:
+ *
+ * . Currently on sn2, we have no way to determine which nasid an IRQ
+ * came from. Thus, xpc_send_IRQ_sn2() does a remote amo write
+ * followed by an IPI. The amo indicates where data is to be pulled
+ * from, so after the IPI arrives, the remote partition checks the amo
+ * word. The IPI can actually arrive before the amo however, so other
+ * code must periodically check for this case. Also, remote amo
+ * operations do not reliably time out. Thus we do a remote PIO read
+ * solely to know whether the remote partition is down and whether we
+ * should stop sending IPIs to it. This remote PIO read operation is
+ * set up in a special nofault region so SAL knows to ignore (and
+ * cleanup) any errors due to the remote amo write, PIO read, and/or
+ * PIO write operations.
+ *
+ * If/when new hardware solves this IPI problem, we should abandon
+ * the current approach.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sysctl.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/reboot.h>
+#include <linux/kdebug.h>
+#include <linux/kthread.h>
+#include "xpc.h"
+
+/* define two XPC debug device structures to be used with dev_dbg() et al */
+
+struct device_driver xpc_dbg_name = {
+ .name = "xpc"
+};
+
+struct device xpc_part_dbg_subname = {
+ .init_name = "", /* set to "part" at xpc_init() time */
+ .driver = &xpc_dbg_name
+};
+
+struct device xpc_chan_dbg_subname = {
+ .init_name = "", /* set to "chan" at xpc_init() time */
+ .driver = &xpc_dbg_name
+};
+
+struct device *xpc_part = &xpc_part_dbg_subname;
+struct device *xpc_chan = &xpc_chan_dbg_subname;
+
+static int xpc_kdebug_ignore;
+
+/* systune related variables for /proc/sys directories */
+
+static int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL;
+static int xpc_hb_min_interval = 1;
+static int xpc_hb_max_interval = 10;
+
+static int xpc_hb_check_interval = XPC_HB_CHECK_DEFAULT_INTERVAL;
+static int xpc_hb_check_min_interval = 10;
+static int xpc_hb_check_max_interval = 120;
+
+int xpc_disengage_timelimit = XPC_DISENGAGE_DEFAULT_TIMELIMIT;
+static int xpc_disengage_min_timelimit; /* = 0 */
+static int xpc_disengage_max_timelimit = 120;
+
+static ctl_table xpc_sys_xpc_hb_dir[] = {
+ {
+ .procname = "hb_interval",
+ .data = &xpc_hb_interval,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &xpc_hb_min_interval,
+ .extra2 = &xpc_hb_max_interval},
+ {
+ .procname = "hb_check_interval",
+ .data = &xpc_hb_check_interval,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &xpc_hb_check_min_interval,
+ .extra2 = &xpc_hb_check_max_interval},
+ {}
+};
+static ctl_table xpc_sys_xpc_dir[] = {
+ {
+ .procname = "hb",
+ .mode = 0555,
+ .child = xpc_sys_xpc_hb_dir},
+ {
+ .procname = "disengage_timelimit",
+ .data = &xpc_disengage_timelimit,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &xpc_disengage_min_timelimit,
+ .extra2 = &xpc_disengage_max_timelimit},
+ {}
+};
+static ctl_table xpc_sys_dir[] = {
+ {
+ .procname = "xpc",
+ .mode = 0555,
+ .child = xpc_sys_xpc_dir},
+ {}
+};
+static struct ctl_table_header *xpc_sysctl;
+
+/* non-zero if any remote partition disengage was timed out */
+int xpc_disengage_timedout;
+
+/* #of activate IRQs received and not yet processed */
+int xpc_activate_IRQ_rcvd;
+DEFINE_SPINLOCK(xpc_activate_IRQ_rcvd_lock);
+
+/* IRQ handler notifies this wait queue on receipt of an IRQ */
+DECLARE_WAIT_QUEUE_HEAD(xpc_activate_IRQ_wq);
+
+static unsigned long xpc_hb_check_timeout;
+static struct timer_list xpc_hb_timer;
+
+/* notification that the xpc_hb_checker thread has exited */
+static DECLARE_COMPLETION(xpc_hb_checker_exited);
+
+/* notification that the xpc_discovery thread has exited */
+static DECLARE_COMPLETION(xpc_discovery_exited);
+
+static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *);
+
+static int xpc_system_reboot(struct notifier_block *, unsigned long, void *);
+static struct notifier_block xpc_reboot_notifier = {
+ .notifier_call = xpc_system_reboot,
+};
+
+static int xpc_system_die(struct notifier_block *, unsigned long, void *);
+static struct notifier_block xpc_die_notifier = {
+ .notifier_call = xpc_system_die,
+};
+
+struct xpc_arch_operations xpc_arch_ops;
+
+/*
+ * Timer function to enforce the timelimit on the partition disengage.
+ */
+static void
+xpc_timeout_partition_disengage(unsigned long data)
+{
+ struct xpc_partition *part = (struct xpc_partition *)data;
+
+ DBUG_ON(time_is_after_jiffies(part->disengage_timeout));
+
+ (void)xpc_partition_disengaged(part);
+
+ DBUG_ON(part->disengage_timeout != 0);
+ DBUG_ON(xpc_arch_ops.partition_engaged(XPC_PARTID(part)));
+}
+
+/*
+ * Timer to produce the heartbeat. The timer structures function is
+ * already set when this is initially called. A tunable is used to
+ * specify when the next timeout should occur.
+ */
+static void
+xpc_hb_beater(unsigned long dummy)
+{
+ xpc_arch_ops.increment_heartbeat();
+
+ if (time_is_before_eq_jiffies(xpc_hb_check_timeout))
+ wake_up_interruptible(&xpc_activate_IRQ_wq);
+
+ xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ);
+ add_timer(&xpc_hb_timer);
+}
+
+static void
+xpc_start_hb_beater(void)
+{
+ xpc_arch_ops.heartbeat_init();
+ init_timer(&xpc_hb_timer);
+ xpc_hb_timer.function = xpc_hb_beater;
+ xpc_hb_beater(0);
+}
+
+static void
+xpc_stop_hb_beater(void)
+{
+ del_timer_sync(&xpc_hb_timer);
+ xpc_arch_ops.heartbeat_exit();
+}
+
+/*
+ * At periodic intervals, scan through all active partitions and ensure
+ * their heartbeat is still active. If not, the partition is deactivated.
+ */
+static void
+xpc_check_remote_hb(void)
+{
+ struct xpc_partition *part;
+ short partid;
+ enum xp_retval ret;
+
+ for (partid = 0; partid < xp_max_npartitions; partid++) {
+
+ if (xpc_exiting)
+ break;
+
+ if (partid == xp_partition_id)
+ continue;
+
+ part = &xpc_partitions[partid];
+
+ if (part->act_state == XPC_P_AS_INACTIVE ||
+ part->act_state == XPC_P_AS_DEACTIVATING) {
+ continue;
+ }
+
+ ret = xpc_arch_ops.get_remote_heartbeat(part);
+ if (ret != xpSuccess)
+ XPC_DEACTIVATE_PARTITION(part, ret);
+ }
+}
+
+/*
+ * This thread is responsible for nearly all of the partition
+ * activation/deactivation.
+ */
+static int
+xpc_hb_checker(void *ignore)
+{
+ int force_IRQ = 0;
+
+ /* this thread was marked active by xpc_hb_init() */
+
+ set_cpus_allowed_ptr(current, cpumask_of(XPC_HB_CHECK_CPU));
+
+ /* set our heartbeating to other partitions into motion */
+ xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
+ xpc_start_hb_beater();
+
+ while (!xpc_exiting) {
+
+ dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have "
+ "been received\n",
+ (int)(xpc_hb_check_timeout - jiffies),
+ xpc_activate_IRQ_rcvd);
+
+ /* checking of remote heartbeats is skewed by IRQ handling */
+ if (time_is_before_eq_jiffies(xpc_hb_check_timeout)) {
+ xpc_hb_check_timeout = jiffies +
+ (xpc_hb_check_interval * HZ);
+
+ dev_dbg(xpc_part, "checking remote heartbeats\n");
+ xpc_check_remote_hb();
+
+ /*
+ * On sn2 we need to periodically recheck to ensure no
+ * IRQ/amo pairs have been missed.
+ */
+ if (is_shub())
+ force_IRQ = 1;
+ }
+
+ /* check for outstanding IRQs */
+ if (xpc_activate_IRQ_rcvd > 0 || force_IRQ != 0) {
+ force_IRQ = 0;
+ dev_dbg(xpc_part, "processing activate IRQs "
+ "received\n");
+ xpc_arch_ops.process_activate_IRQ_rcvd();
+ }
+
+ /* wait for IRQ or timeout */
+ (void)wait_event_interruptible(xpc_activate_IRQ_wq,
+ (time_is_before_eq_jiffies(
+ xpc_hb_check_timeout) ||
+ xpc_activate_IRQ_rcvd > 0 ||
+ xpc_exiting));
+ }
+
+ xpc_stop_hb_beater();
+
+ dev_dbg(xpc_part, "heartbeat checker is exiting\n");
+
+ /* mark this thread as having exited */
+ complete(&xpc_hb_checker_exited);
+ return 0;
+}
+
+/*
+ * This thread will attempt to discover other partitions to activate
+ * based on info provided by SAL. This new thread is short lived and
+ * will exit once discovery is complete.
+ */
+static int
+xpc_initiate_discovery(void *ignore)
+{
+ xpc_discovery();
+
+ dev_dbg(xpc_part, "discovery thread is exiting\n");
+
+ /* mark this thread as having exited */
+ complete(&xpc_discovery_exited);
+ return 0;
+}
+
+/*
+ * The first kthread assigned to a newly activated partition is the one
+ * created by XPC HB with which it calls xpc_activating(). XPC hangs on to
+ * that kthread until the partition is brought down, at which time that kthread
+ * returns back to XPC HB. (The return of that kthread will signify to XPC HB
+ * that XPC has dismantled all communication infrastructure for the associated
+ * partition.) This kthread becomes the channel manager for that partition.
+ *
+ * Each active partition has a channel manager, who, besides connecting and
+ * disconnecting channels, will ensure that each of the partition's connected
+ * channels has the required number of assigned kthreads to get the work done.
+ */
+static void
+xpc_channel_mgr(struct xpc_partition *part)
+{
+ while (part->act_state != XPC_P_AS_DEACTIVATING ||
+ atomic_read(&part->nchannels_active) > 0 ||
+ !xpc_partition_disengaged(part)) {
+
+ xpc_process_sent_chctl_flags(part);
+
+ /*
+ * Wait until we've been requested to activate kthreads or
+ * all of the channel's message queues have been torn down or
+ * a signal is pending.
+ *
+ * The channel_mgr_requests is set to 1 after being awakened,
+ * This is done to prevent the channel mgr from making one pass
+ * through the loop for each request, since he will
+ * be servicing all the requests in one pass. The reason it's
+ * set to 1 instead of 0 is so that other kthreads will know
+ * that the channel mgr is running and won't bother trying to
+ * wake him up.
+ */
+ atomic_dec(&part->channel_mgr_requests);
+ (void)wait_event_interruptible(part->channel_mgr_wq,
+ (atomic_read(&part->channel_mgr_requests) > 0 ||
+ part->chctl.all_flags != 0 ||
+ (part->act_state == XPC_P_AS_DEACTIVATING &&
+ atomic_read(&part->nchannels_active) == 0 &&
+ xpc_partition_disengaged(part))));
+ atomic_set(&part->channel_mgr_requests, 1);
+ }
+}
+
+/*
+ * Guarantee that the kzalloc'd memory is cacheline aligned.
+ */
+void *
+xpc_kzalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
+{
+ /* see if kzalloc will give us cachline aligned memory by default */
+ *base = kzalloc(size, flags);
+ if (*base == NULL)
+ return NULL;
+
+ if ((u64)*base == L1_CACHE_ALIGN((u64)*base))
+ return *base;
+
+ kfree(*base);
+
+ /* nope, we'll have to do it ourselves */
+ *base = kzalloc(size + L1_CACHE_BYTES, flags);
+ if (*base == NULL)
+ return NULL;
+
+ return (void *)L1_CACHE_ALIGN((u64)*base);
+}
+
+/*
+ * Setup the channel structures necessary to support XPartition Communication
+ * between the specified remote partition and the local one.
+ */
+static enum xp_retval
+xpc_setup_ch_structures(struct xpc_partition *part)
+{
+ enum xp_retval ret;
+ int ch_number;
+ struct xpc_channel *ch;
+ short partid = XPC_PARTID(part);
+
+ /*
+ * Allocate all of the channel structures as a contiguous chunk of
+ * memory.
+ */
+ DBUG_ON(part->channels != NULL);
+ part->channels = kzalloc(sizeof(struct xpc_channel) * XPC_MAX_NCHANNELS,
+ GFP_KERNEL);
+ if (part->channels == NULL) {
+ dev_err(xpc_chan, "can't get memory for channels\n");
+ return xpNoMemory;
+ }
+
+ /* allocate the remote open and close args */
+
+ part->remote_openclose_args =
+ xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE,
+ GFP_KERNEL, &part->
+ remote_openclose_args_base);
+ if (part->remote_openclose_args == NULL) {
+ dev_err(xpc_chan, "can't get memory for remote connect args\n");
+ ret = xpNoMemory;
+ goto out_1;
+ }
+
+ part->chctl.all_flags = 0;
+ spin_lock_init(&part->chctl_lock);
+
+ atomic_set(&part->channel_mgr_requests, 1);
+ init_waitqueue_head(&part->channel_mgr_wq);
+
+ part->nchannels = XPC_MAX_NCHANNELS;
+
+ atomic_set(&part->nchannels_active, 0);
+ atomic_set(&part->nchannels_engaged, 0);
+
+ for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
+ ch = &part->channels[ch_number];
+
+ ch->partid = partid;
+ ch->number = ch_number;
+ ch->flags = XPC_C_DISCONNECTED;
+
+ atomic_set(&ch->kthreads_assigned, 0);
+ atomic_set(&ch->kthreads_idle, 0);
+ atomic_set(&ch->kthreads_active, 0);
+
+ atomic_set(&ch->references, 0);
+ atomic_set(&ch->n_to_notify, 0);
+
+ spin_lock_init(&ch->lock);
+ init_completion(&ch->wdisconnect_wait);
+
+ atomic_set(&ch->n_on_msg_allocate_wq, 0);
+ init_waitqueue_head(&ch->msg_allocate_wq);
+ init_waitqueue_head(&ch->idle_wq);
+ }
+
+ ret = xpc_arch_ops.setup_ch_structures(part);
+ if (ret != xpSuccess)
+ goto out_2;
+
+ /*
+ * With the setting of the partition setup_state to XPC_P_SS_SETUP,
+ * we're declaring that this partition is ready to go.
+ */
+ part->setup_state = XPC_P_SS_SETUP;
+
+ return xpSuccess;
+
+ /* setup of ch structures failed */
+out_2:
+ kfree(part->remote_openclose_args_base);
+ part->remote_openclose_args = NULL;
+out_1:
+ kfree(part->channels);
+ part->channels = NULL;
+ return ret;
+}
+
+/*
+ * Teardown the channel structures necessary to support XPartition Communication
+ * between the specified remote partition and the local one.
+ */
+static void
+xpc_teardown_ch_structures(struct xpc_partition *part)
+{
+ DBUG_ON(atomic_read(&part->nchannels_engaged) != 0);
+ DBUG_ON(atomic_read(&part->nchannels_active) != 0);
+
+ /*
+ * Make this partition inaccessible to local processes by marking it
+ * as no longer setup. Then wait before proceeding with the teardown
+ * until all existing references cease.
+ */
+ DBUG_ON(part->setup_state != XPC_P_SS_SETUP);
+ part->setup_state = XPC_P_SS_WTEARDOWN;
+
+ wait_event(part->teardown_wq, (atomic_read(&part->references) == 0));
+
+ /* now we can begin tearing down the infrastructure */
+
+ xpc_arch_ops.teardown_ch_structures(part);
+
+ kfree(part->remote_openclose_args_base);
+ part->remote_openclose_args = NULL;
+ kfree(part->channels);
+ part->channels = NULL;
+
+ part->setup_state = XPC_P_SS_TORNDOWN;
+}
+
+/*
+ * When XPC HB determines that a partition has come up, it will create a new
+ * kthread and that kthread will call this function to attempt to set up the
+ * basic infrastructure used for Cross Partition Communication with the newly
+ * upped partition.
+ *
+ * The kthread that was created by XPC HB and which setup the XPC
+ * infrastructure will remain assigned to the partition becoming the channel
+ * manager for that partition until the partition is deactivating, at which
+ * time the kthread will teardown the XPC infrastructure and then exit.
+ */
+static int
+xpc_activating(void *__partid)
+{
+ short partid = (u64)__partid;
+ struct xpc_partition *part = &xpc_partitions[partid];
+ unsigned long irq_flags;
+
+ DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
+
+ spin_lock_irqsave(&part->act_lock, irq_flags);
+
+ if (part->act_state == XPC_P_AS_DEACTIVATING) {
+ part->act_state = XPC_P_AS_INACTIVE;
+ spin_unlock_irqrestore(&part->act_lock, irq_flags);
+ part->remote_rp_pa = 0;
+ return 0;
+ }
+
+ /* indicate the thread is activating */
+ DBUG_ON(part->act_state != XPC_P_AS_ACTIVATION_REQ);
+ part->act_state = XPC_P_AS_ACTIVATING;
+
+ XPC_SET_REASON(part, 0, 0);
+ spin_unlock_irqrestore(&part->act_lock, irq_flags);
+
+ dev_dbg(xpc_part, "activating partition %d\n", partid);
+
+ xpc_arch_ops.allow_hb(partid);
+
+ if (xpc_setup_ch_structures(part) == xpSuccess) {
+ (void)xpc_part_ref(part); /* this will always succeed */
+
+ if (xpc_arch_ops.make_first_contact(part) == xpSuccess) {
+ xpc_mark_partition_active(part);
+ xpc_channel_mgr(part);
+ /* won't return until partition is deactivating */
+ }
+
+ xpc_part_deref(part);
+ xpc_teardown_ch_structures(part);
+ }
+
+ xpc_arch_ops.disallow_hb(partid);
+ xpc_mark_partition_inactive(part);
+
+ if (part->reason == xpReactivating) {
+ /* interrupting ourselves results in activating partition */
+ xpc_arch_ops.request_partition_reactivation(part);
+ }
+
+ return 0;
+}
+
+void
+xpc_activate_partition(struct xpc_partition *part)
+{
+ short partid = XPC_PARTID(part);
+ unsigned long irq_flags;
+ struct task_struct *kthread;
+
+ spin_lock_irqsave(&part->act_lock, irq_flags);
+
+ DBUG_ON(part->act_state != XPC_P_AS_INACTIVE);
+
+ part->act_state = XPC_P_AS_ACTIVATION_REQ;
+ XPC_SET_REASON(part, xpCloneKThread, __LINE__);
+
+ spin_unlock_irqrestore(&part->act_lock, irq_flags);
+
+ kthread = kthread_run(xpc_activating, (void *)((u64)partid), "xpc%02d",
+ partid);
+ if (IS_ERR(kthread)) {
+ spin_lock_irqsave(&part->act_lock, irq_flags);
+ part->act_state = XPC_P_AS_INACTIVE;
+ XPC_SET_REASON(part, xpCloneKThreadFailed, __LINE__);
+ spin_unlock_irqrestore(&part->act_lock, irq_flags);
+ }
+}
+
+void
+xpc_activate_kthreads(struct xpc_channel *ch, int needed)
+{
+ int idle = atomic_read(&ch->kthreads_idle);
+ int assigned = atomic_read(&ch->kthreads_assigned);
+ int wakeup;
+
+ DBUG_ON(needed <= 0);
+
+ if (idle > 0) {
+ wakeup = (needed > idle) ? idle : needed;
+ needed -= wakeup;
+
+ dev_dbg(xpc_chan, "wakeup %d idle kthreads, partid=%d, "
+ "channel=%d\n", wakeup, ch->partid, ch->number);
+
+ /* only wakeup the requested number of kthreads */
+ wake_up_nr(&ch->idle_wq, wakeup);
+ }
+
+ if (needed <= 0)
+ return;
+
+ if (needed + assigned > ch->kthreads_assigned_limit) {
+ needed = ch->kthreads_assigned_limit - assigned;
+ if (needed <= 0)
+ return;
+ }
+
+ dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n",
+ needed, ch->partid, ch->number);
+
+ xpc_create_kthreads(ch, needed, 0);
+}
+
+/*
+ * This function is where XPC's kthreads wait for messages to deliver.
+ */
+static void
+xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
+{
+ int (*n_of_deliverable_payloads) (struct xpc_channel *) =
+ xpc_arch_ops.n_of_deliverable_payloads;
+
+ do {
+ /* deliver messages to their intended recipients */
+
+ while (n_of_deliverable_payloads(ch) > 0 &&
+ !(ch->flags & XPC_C_DISCONNECTING)) {
+ xpc_deliver_payload(ch);
+ }
+
+ if (atomic_inc_return(&ch->kthreads_idle) >
+ ch->kthreads_idle_limit) {
+ /* too many idle kthreads on this channel */
+ atomic_dec(&ch->kthreads_idle);
+ break;
+ }
+
+ dev_dbg(xpc_chan, "idle kthread calling "
+ "wait_event_interruptible_exclusive()\n");
+
+ (void)wait_event_interruptible_exclusive(ch->idle_wq,
+ (n_of_deliverable_payloads(ch) > 0 ||
+ (ch->flags & XPC_C_DISCONNECTING)));
+
+ atomic_dec(&ch->kthreads_idle);
+
+ } while (!(ch->flags & XPC_C_DISCONNECTING));
+}
+
+static int
+xpc_kthread_start(void *args)
+{
+ short partid = XPC_UNPACK_ARG1(args);
+ u16 ch_number = XPC_UNPACK_ARG2(args);
+ struct xpc_partition *part = &xpc_partitions[partid];
+ struct xpc_channel *ch;
+ int n_needed;
+ unsigned long irq_flags;
+ int (*n_of_deliverable_payloads) (struct xpc_channel *) =
+ xpc_arch_ops.n_of_deliverable_payloads;
+
+ dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n",
+ partid, ch_number);
+
+ ch = &part->channels[ch_number];
+
+ if (!(ch->flags & XPC_C_DISCONNECTING)) {
+
+ /* let registerer know that connection has been established */
+
+ spin_lock_irqsave(&ch->lock, irq_flags);
+ if (!(ch->flags & XPC_C_CONNECTEDCALLOUT)) {
+ ch->flags |= XPC_C_CONNECTEDCALLOUT;
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+
+ xpc_connected_callout(ch);
+
+ spin_lock_irqsave(&ch->lock, irq_flags);
+ ch->flags |= XPC_C_CONNECTEDCALLOUT_MADE;
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+
+ /*
+ * It is possible that while the callout was being
+ * made that the remote partition sent some messages.
+ * If that is the case, we may need to activate
+ * additional kthreads to help deliver them. We only
+ * need one less than total #of messages to deliver.
+ */
+ n_needed = n_of_deliverable_payloads(ch) - 1;
+ if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING))
+ xpc_activate_kthreads(ch, n_needed);
+
+ } else {
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ }
+
+ xpc_kthread_waitmsgs(part, ch);
+ }
+
+ /* let registerer know that connection is disconnecting */
+
+ spin_lock_irqsave(&ch->lock, irq_flags);
+ if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
+ !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
+ ch->flags |= XPC_C_DISCONNECTINGCALLOUT;
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+
+ xpc_disconnect_callout(ch, xpDisconnecting);
+
+ spin_lock_irqsave(&ch->lock, irq_flags);
+ ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE;
+ }
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+
+ if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
+ atomic_dec_return(&part->nchannels_engaged) == 0) {
+ xpc_arch_ops.indicate_partition_disengaged(part);
+ }
+
+ xpc_msgqueue_deref(ch);
+
+ dev_dbg(xpc_chan, "kthread exiting, partid=%d, channel=%d\n",
+ partid, ch_number);
+
+ xpc_part_deref(part);
+ return 0;
+}
+
+/*
+ * For each partition that XPC has established communications with, there is
+ * a minimum of one kernel thread assigned to perform any operation that
+ * may potentially sleep or block (basically the callouts to the asynchronous
+ * functions registered via xpc_connect()).
+ *
+ * Additional kthreads are created and destroyed by XPC as the workload
+ * demands.
+ *
+ * A kthread is assigned to one of the active channels that exists for a given
+ * partition.
+ */
+void
+xpc_create_kthreads(struct xpc_channel *ch, int needed,
+ int ignore_disconnecting)
+{
+ unsigned long irq_flags;
+ u64 args = XPC_PACK_ARGS(ch->partid, ch->number);
+ struct xpc_partition *part = &xpc_partitions[ch->partid];
+ struct task_struct *kthread;
+ void (*indicate_partition_disengaged) (struct xpc_partition *) =
+ xpc_arch_ops.indicate_partition_disengaged;
+
+ while (needed-- > 0) {
+
+ /*
+ * The following is done on behalf of the newly created
+ * kthread. That kthread is responsible for doing the
+ * counterpart to the following before it exits.
+ */
+ if (ignore_disconnecting) {
+ if (!atomic_inc_not_zero(&ch->kthreads_assigned)) {
+ /* kthreads assigned had gone to zero */
+ BUG_ON(!(ch->flags &
+ XPC_C_DISCONNECTINGCALLOUT_MADE));
+ break;
+ }
+
+ } else if (ch->flags & XPC_C_DISCONNECTING) {
+ break;
+
+ } else if (atomic_inc_return(&ch->kthreads_assigned) == 1 &&
+ atomic_inc_return(&part->nchannels_engaged) == 1) {
+ xpc_arch_ops.indicate_partition_engaged(part);
+ }
+ (void)xpc_part_ref(part);
+ xpc_msgqueue_ref(ch);
+
+ kthread = kthread_run(xpc_kthread_start, (void *)args,
+ "xpc%02dc%d", ch->partid, ch->number);
+ if (IS_ERR(kthread)) {
+ /* the fork failed */
+
+ /*
+ * NOTE: if (ignore_disconnecting &&
+ * !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) is true,
+ * then we'll deadlock if all other kthreads assigned
+ * to this channel are blocked in the channel's
+ * registerer, because the only thing that will unblock
+ * them is the xpDisconnecting callout that this
+ * failed kthread_run() would have made.
+ */
+
+ if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
+ atomic_dec_return(&part->nchannels_engaged) == 0) {
+ indicate_partition_disengaged(part);
+ }
+ xpc_msgqueue_deref(ch);
+ xpc_part_deref(part);
+
+ if (atomic_read(&ch->kthreads_assigned) <
+ ch->kthreads_idle_limit) {
+ /*
+ * Flag this as an error only if we have an
+ * insufficient #of kthreads for the channel
+ * to function.
+ */
+ spin_lock_irqsave(&ch->lock, irq_flags);
+ XPC_DISCONNECT_CHANNEL(ch, xpLackOfResources,
+ &irq_flags);
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ }
+ break;
+ }
+ }
+}
+
+void
+xpc_disconnect_wait(int ch_number)
+{
+ unsigned long irq_flags;
+ short partid;
+ struct xpc_partition *part;
+ struct xpc_channel *ch;
+ int wakeup_channel_mgr;
+
+ /* now wait for all callouts to the caller's function to cease */
+ for (partid = 0; partid < xp_max_npartitions; partid++) {
+ part = &xpc_partitions[partid];
+
+ if (!xpc_part_ref(part))
+ continue;
+
+ ch = &part->channels[ch_number];
+
+ if (!(ch->flags & XPC_C_WDISCONNECT)) {
+ xpc_part_deref(part);
+ continue;
+ }
+
+ wait_for_completion(&ch->wdisconnect_wait);
+
+ spin_lock_irqsave(&ch->lock, irq_flags);
+ DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED));
+ wakeup_channel_mgr = 0;
+
+ if (ch->delayed_chctl_flags) {
+ if (part->act_state != XPC_P_AS_DEACTIVATING) {
+ spin_lock(&part->chctl_lock);
+ part->chctl.flags[ch->number] |=
+ ch->delayed_chctl_flags;
+ spin_unlock(&part->chctl_lock);
+ wakeup_channel_mgr = 1;
+ }
+ ch->delayed_chctl_flags = 0;
+ }
+
+ ch->flags &= ~XPC_C_WDISCONNECT;
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+
+ if (wakeup_channel_mgr)
+ xpc_wakeup_channel_mgr(part);
+
+ xpc_part_deref(part);
+ }
+}
+
+static int
+xpc_setup_partitions(void)
+{
+ short partid;
+ struct xpc_partition *part;
+
+ xpc_partitions = kzalloc(sizeof(struct xpc_partition) *
+ xp_max_npartitions, GFP_KERNEL);
+ if (xpc_partitions == NULL) {
+ dev_err(xpc_part, "can't get memory for partition structure\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * The first few fields of each entry of xpc_partitions[] need to
+ * be initialized now so that calls to xpc_connect() and
+ * xpc_disconnect() can be made prior to the activation of any remote
+ * partition. NOTE THAT NONE OF THE OTHER FIELDS BELONGING TO THESE
+ * ENTRIES ARE MEANINGFUL UNTIL AFTER AN ENTRY'S CORRESPONDING
+ * PARTITION HAS BEEN ACTIVATED.
+ */
+ for (partid = 0; partid < xp_max_npartitions; partid++) {
+ part = &xpc_partitions[partid];
+
+ DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part));
+
+ part->activate_IRQ_rcvd = 0;
+ spin_lock_init(&part->act_lock);
+ part->act_state = XPC_P_AS_INACTIVE;
+ XPC_SET_REASON(part, 0, 0);
+
+ init_timer(&part->disengage_timer);
+ part->disengage_timer.function =
+ xpc_timeout_partition_disengage;
+ part->disengage_timer.data = (unsigned long)part;
+
+ part->setup_state = XPC_P_SS_UNSET;
+ init_waitqueue_head(&part->teardown_wq);
+ atomic_set(&part->references, 0);
+ }
+
+ return xpc_arch_ops.setup_partitions();
+}
+
+static void
+xpc_teardown_partitions(void)
+{
+ xpc_arch_ops.teardown_partitions();
+ kfree(xpc_partitions);
+}
+
+static void
+xpc_do_exit(enum xp_retval reason)
+{
+ short partid;
+ int active_part_count, printed_waiting_msg = 0;
+ struct xpc_partition *part;
+ unsigned long printmsg_time, disengage_timeout = 0;
+
+ /* a 'rmmod XPC' and a 'reboot' cannot both end up here together */
+ DBUG_ON(xpc_exiting == 1);
+
+ /*
+ * Let the heartbeat checker thread and the discovery thread
+ * (if one is running) know that they should exit. Also wake up
+ * the heartbeat checker thread in case it's sleeping.
+ */
+ xpc_exiting = 1;
+ wake_up_interruptible(&xpc_activate_IRQ_wq);
+
+ /* wait for the discovery thread to exit */
+ wait_for_completion(&xpc_discovery_exited);
+
+ /* wait for the heartbeat checker thread to exit */
+ wait_for_completion(&xpc_hb_checker_exited);
+
+ /* sleep for a 1/3 of a second or so */
+ (void)msleep_interruptible(300);
+
+ /* wait for all partitions to become inactive */
+
+ printmsg_time = jiffies + (XPC_DEACTIVATE_PRINTMSG_INTERVAL * HZ);
+ xpc_disengage_timedout = 0;
+
+ do {
+ active_part_count = 0;
+
+ for (partid = 0; partid < xp_max_npartitions; partid++) {
+ part = &xpc_partitions[partid];
+
+ if (xpc_partition_disengaged(part) &&
+ part->act_state == XPC_P_AS_INACTIVE) {
+ continue;
+ }
+
+ active_part_count++;
+
+ XPC_DEACTIVATE_PARTITION(part, reason);
+
+ if (part->disengage_timeout > disengage_timeout)
+ disengage_timeout = part->disengage_timeout;
+ }
+
+ if (xpc_arch_ops.any_partition_engaged()) {
+ if (time_is_before_jiffies(printmsg_time)) {
+ dev_info(xpc_part, "waiting for remote "
+ "partitions to deactivate, timeout in "
+ "%ld seconds\n", (disengage_timeout -
+ jiffies) / HZ);
+ printmsg_time = jiffies +
+ (XPC_DEACTIVATE_PRINTMSG_INTERVAL * HZ);
+ printed_waiting_msg = 1;
+ }
+
+ } else if (active_part_count > 0) {
+ if (printed_waiting_msg) {
+ dev_info(xpc_part, "waiting for local partition"
+ " to deactivate\n");
+ printed_waiting_msg = 0;
+ }
+
+ } else {
+ if (!xpc_disengage_timedout) {
+ dev_info(xpc_part, "all partitions have "
+ "deactivated\n");
+ }
+ break;
+ }
+
+ /* sleep for a 1/3 of a second or so */
+ (void)msleep_interruptible(300);
+
+ } while (1);
+
+ DBUG_ON(xpc_arch_ops.any_partition_engaged());
+
+ xpc_teardown_rsvd_page();
+
+ if (reason == xpUnloading) {
+ (void)unregister_die_notifier(&xpc_die_notifier);
+ (void)unregister_reboot_notifier(&xpc_reboot_notifier);
+ }
+
+ /* clear the interface to XPC's functions */
+ xpc_clear_interface();
+
+ if (xpc_sysctl)
+ unregister_sysctl_table(xpc_sysctl);
+
+ xpc_teardown_partitions();
+
+ if (is_shub())
+ xpc_exit_sn2();
+ else if (is_uv())
+ xpc_exit_uv();
+}
+
+/*
+ * This function is called when the system is being rebooted.
+ */
+static int
+xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
+{
+ enum xp_retval reason;
+
+ switch (event) {
+ case SYS_RESTART:
+ reason = xpSystemReboot;
+ break;
+ case SYS_HALT:
+ reason = xpSystemHalt;
+ break;
+ case SYS_POWER_OFF:
+ reason = xpSystemPoweroff;
+ break;
+ default:
+ reason = xpSystemGoingDown;
+ }
+
+ xpc_do_exit(reason);
+ return NOTIFY_DONE;
+}
+
+/*
+ * Notify other partitions to deactivate from us by first disengaging from all
+ * references to our memory.
+ */
+static void
+xpc_die_deactivate(void)
+{
+ struct xpc_partition *part;
+ short partid;
+ int any_engaged;
+ long keep_waiting;
+ long wait_to_print;
+
+ /* keep xpc_hb_checker thread from doing anything (just in case) */
+ xpc_exiting = 1;
+
+ xpc_arch_ops.disallow_all_hbs(); /*indicate we're deactivated */
+
+ for (partid = 0; partid < xp_max_npartitions; partid++) {
+ part = &xpc_partitions[partid];
+
+ if (xpc_arch_ops.partition_engaged(partid) ||
+ part->act_state != XPC_P_AS_INACTIVE) {
+ xpc_arch_ops.request_partition_deactivation(part);
+ xpc_arch_ops.indicate_partition_disengaged(part);
+ }
+ }
+
+ /*
+ * Though we requested that all other partitions deactivate from us,
+ * we only wait until they've all disengaged or we've reached the
+ * defined timelimit.
+ *
+ * Given that one iteration through the following while-loop takes
+ * approximately 200 microseconds, calculate the #of loops to take
+ * before bailing and the #of loops before printing a waiting message.
+ */
+ keep_waiting = xpc_disengage_timelimit * 1000 * 5;
+ wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL * 1000 * 5;
+
+ while (1) {
+ any_engaged = xpc_arch_ops.any_partition_engaged();
+ if (!any_engaged) {
+ dev_info(xpc_part, "all partitions have deactivated\n");
+ break;
+ }
+
+ if (!keep_waiting--) {
+ for (partid = 0; partid < xp_max_npartitions;
+ partid++) {
+ if (xpc_arch_ops.partition_engaged(partid)) {
+ dev_info(xpc_part, "deactivate from "
+ "remote partition %d timed "
+ "out\n", partid);
+ }
+ }
+ break;
+ }
+
+ if (!wait_to_print--) {
+ dev_info(xpc_part, "waiting for remote partitions to "
+ "deactivate, timeout in %ld seconds\n",
+ keep_waiting / (1000 * 5));
+ wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL *
+ 1000 * 5;
+ }
+
+ udelay(200);
+ }
+}
+
+/*
+ * This function is called when the system is being restarted or halted due
+ * to some sort of system failure. If this is the case we need to notify the
+ * other partitions to disengage from all references to our memory.
+ * This function can also be called when our heartbeater could be offlined
+ * for a time. In this case we need to notify other partitions to not worry
+ * about the lack of a heartbeat.
+ */
+static int
+xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
+{
+#ifdef CONFIG_IA64 /* !!! temporary kludge */
+ switch (event) {
+ case DIE_MACHINE_RESTART:
+ case DIE_MACHINE_HALT:
+ xpc_die_deactivate();
+ break;
+
+ case DIE_KDEBUG_ENTER:
+ /* Should lack of heartbeat be ignored by other partitions? */
+ if (!xpc_kdebug_ignore)
+ break;
+
+ /* fall through */
+ case DIE_MCA_MONARCH_ENTER:
+ case DIE_INIT_MONARCH_ENTER:
+ xpc_arch_ops.offline_heartbeat();
+ break;
+
+ case DIE_KDEBUG_LEAVE:
+ /* Is lack of heartbeat being ignored by other partitions? */
+ if (!xpc_kdebug_ignore)
+ break;
+
+ /* fall through */
+ case DIE_MCA_MONARCH_LEAVE:
+ case DIE_INIT_MONARCH_LEAVE:
+ xpc_arch_ops.online_heartbeat();
+ break;
+ }
+#else
+ xpc_die_deactivate();
+#endif
+
+ return NOTIFY_DONE;
+}
+
+int __init
+xpc_init(void)
+{
+ int ret;
+ struct task_struct *kthread;
+
+ dev_set_name(xpc_part, "part");
+ dev_set_name(xpc_chan, "chan");
+
+ if (is_shub()) {
+ /*
+ * The ia64-sn2 architecture supports at most 64 partitions.
+ * And the inability to unregister remote amos restricts us
+ * further to only support exactly 64 partitions on this
+ * architecture, no less.
+ */
+ if (xp_max_npartitions != 64) {
+ dev_err(xpc_part, "max #of partitions not set to 64\n");
+ ret = -EINVAL;
+ } else {
+ ret = xpc_init_sn2();
+ }
+
+ } else if (is_uv()) {
+ ret = xpc_init_uv();
+
+ } else {
+ ret = -ENODEV;
+ }
+
+ if (ret != 0)
+ return ret;
+
+ ret = xpc_setup_partitions();
+ if (ret != 0) {
+ dev_err(xpc_part, "can't get memory for partition structure\n");
+ goto out_1;
+ }
+
+ xpc_sysctl = register_sysctl_table(xpc_sys_dir);
+
+ /*
+ * Fill the partition reserved page with the information needed by
+ * other partitions to discover we are alive and establish initial
+ * communications.
+ */
+ ret = xpc_setup_rsvd_page();
+ if (ret != 0) {
+ dev_err(xpc_part, "can't setup our reserved page\n");
+ goto out_2;
+ }
+
+ /* add ourselves to the reboot_notifier_list */
+ ret = register_reboot_notifier(&xpc_reboot_notifier);
+ if (ret != 0)
+ dev_warn(xpc_part, "can't register reboot notifier\n");
+
+ /* add ourselves to the die_notifier list */
+ ret = register_die_notifier(&xpc_die_notifier);
+ if (ret != 0)
+ dev_warn(xpc_part, "can't register die notifier\n");
+
+ /*
+ * The real work-horse behind xpc. This processes incoming
+ * interrupts and monitors remote heartbeats.
+ */
+ kthread = kthread_run(xpc_hb_checker, NULL, XPC_HB_CHECK_THREAD_NAME);
+ if (IS_ERR(kthread)) {
+ dev_err(xpc_part, "failed while forking hb check thread\n");
+ ret = -EBUSY;
+ goto out_3;
+ }
+
+ /*
+ * Startup a thread that will attempt to discover other partitions to
+ * activate based on info provided by SAL. This new thread is short
+ * lived and will exit once discovery is complete.
+ */
+ kthread = kthread_run(xpc_initiate_discovery, NULL,
+ XPC_DISCOVERY_THREAD_NAME);
+ if (IS_ERR(kthread)) {
+ dev_err(xpc_part, "failed while forking discovery thread\n");
+
+ /* mark this new thread as a non-starter */
+ complete(&xpc_discovery_exited);
+
+ xpc_do_exit(xpUnloading);
+ return -EBUSY;
+ }
+
+ /* set the interface to point at XPC's functions */
+ xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect,
+ xpc_initiate_send, xpc_initiate_send_notify,
+ xpc_initiate_received, xpc_initiate_partid_to_nasids);
+
+ return 0;
+
+ /* initialization was not successful */
+out_3:
+ xpc_teardown_rsvd_page();
+
+ (void)unregister_die_notifier(&xpc_die_notifier);
+ (void)unregister_reboot_notifier(&xpc_reboot_notifier);
+out_2:
+ if (xpc_sysctl)
+ unregister_sysctl_table(xpc_sysctl);
+
+ xpc_teardown_partitions();
+out_1:
+ if (is_shub())
+ xpc_exit_sn2();
+ else if (is_uv())
+ xpc_exit_uv();
+ return ret;
+}
+
+module_init(xpc_init);
+
+void __exit
+xpc_exit(void)
+{
+ xpc_do_exit(xpUnloading);
+}
+
+module_exit(xpc_exit);
+
+MODULE_AUTHOR("Silicon Graphics, Inc.");
+MODULE_DESCRIPTION("Cross Partition Communication (XPC) support");
+MODULE_LICENSE("GPL");
+
+module_param(xpc_hb_interval, int, 0);
+MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between "
+ "heartbeat increments.");
+
+module_param(xpc_hb_check_interval, int, 0);
+MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between "
+ "heartbeat checks.");
+
+module_param(xpc_disengage_timelimit, int, 0);
+MODULE_PARM_DESC(xpc_disengage_timelimit, "Number of seconds to wait "
+ "for disengage to complete.");
+
+module_param(xpc_kdebug_ignore, int, 0);
+MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by "
+ "other partitions when dropping into kdebug.");
diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c
new file mode 100644
index 00000000..6956f7e7
--- /dev/null
+++ b/drivers/misc/sgi-xp/xpc_partition.c
@@ -0,0 +1,541 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
+ */
+
+/*
+ * Cross Partition Communication (XPC) partition support.
+ *
+ * This is the part of XPC that detects the presence/absence of
+ * other partitions. It provides a heartbeat and monitors the
+ * heartbeats of other partitions.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/hardirq.h>
+#include <linux/slab.h>
+#include "xpc.h"
+#include <asm/uv/uv_hub.h>
+
+/* XPC is exiting flag */
+int xpc_exiting;
+
+/* this partition's reserved page pointers */
+struct xpc_rsvd_page *xpc_rsvd_page;
+static unsigned long *xpc_part_nasids;
+unsigned long *xpc_mach_nasids;
+
+static int xpc_nasid_mask_nbytes; /* #of bytes in nasid mask */
+int xpc_nasid_mask_nlongs; /* #of longs in nasid mask */
+
+struct xpc_partition *xpc_partitions;
+
+/*
+ * Guarantee that the kmalloc'd memory is cacheline aligned.
+ */
+void *
+xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
+{
+ /* see if kmalloc will give us cachline aligned memory by default */
+ *base = kmalloc(size, flags);
+ if (*base == NULL)
+ return NULL;
+
+ if ((u64)*base == L1_CACHE_ALIGN((u64)*base))
+ return *base;
+
+ kfree(*base);
+
+ /* nope, we'll have to do it ourselves */
+ *base = kmalloc(size + L1_CACHE_BYTES, flags);
+ if (*base == NULL)
+ return NULL;
+
+ return (void *)L1_CACHE_ALIGN((u64)*base);
+}
+
+/*
+ * Given a nasid, get the physical address of the partition's reserved page
+ * for that nasid. This function returns 0 on any error.
+ */
+static unsigned long
+xpc_get_rsvd_page_pa(int nasid)
+{
+ enum xp_retval ret;
+ u64 cookie = 0;
+ unsigned long rp_pa = nasid; /* seed with nasid */
+ size_t len = 0;
+ size_t buf_len = 0;
+ void *buf = buf;
+ void *buf_base = NULL;
+ enum xp_retval (*get_partition_rsvd_page_pa)
+ (void *, u64 *, unsigned long *, size_t *) =
+ xpc_arch_ops.get_partition_rsvd_page_pa;
+
+ while (1) {
+
+ /* !!! rp_pa will need to be _gpa on UV.
+ * ??? So do we save it into the architecture specific parts
+ * ??? of the xpc_partition structure? Do we rename this
+ * ??? function or have two versions? Rename rp_pa for UV to
+ * ??? rp_gpa?
+ */
+ ret = get_partition_rsvd_page_pa(buf, &cookie, &rp_pa, &len);
+
+ dev_dbg(xpc_part, "SAL returned with ret=%d, cookie=0x%016lx, "
+ "address=0x%016lx, len=0x%016lx\n", ret,
+ (unsigned long)cookie, rp_pa, len);
+
+ if (ret != xpNeedMoreInfo)
+ break;
+
+ /* !!! L1_CACHE_ALIGN() is only a sn2-bte_copy requirement */
+ if (is_shub())
+ len = L1_CACHE_ALIGN(len);
+
+ if (len > buf_len) {
+ if (buf_base != NULL)
+ kfree(buf_base);
+ buf_len = L1_CACHE_ALIGN(len);
+ buf = xpc_kmalloc_cacheline_aligned(buf_len, GFP_KERNEL,
+ &buf_base);
+ if (buf_base == NULL) {
+ dev_err(xpc_part, "unable to kmalloc "
+ "len=0x%016lx\n", buf_len);
+ ret = xpNoMemory;
+ break;
+ }
+ }
+
+ ret = xp_remote_memcpy(xp_pa(buf), rp_pa, len);
+ if (ret != xpSuccess) {
+ dev_dbg(xpc_part, "xp_remote_memcpy failed %d\n", ret);
+ break;
+ }
+ }
+
+ kfree(buf_base);
+
+ if (ret != xpSuccess)
+ rp_pa = 0;
+
+ dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", rp_pa);
+ return rp_pa;
+}
+
+/*
+ * Fill the partition reserved page with the information needed by
+ * other partitions to discover we are alive and establish initial
+ * communications.
+ */
+int
+xpc_setup_rsvd_page(void)
+{
+ int ret;
+ struct xpc_rsvd_page *rp;
+ unsigned long rp_pa;
+ unsigned long new_ts_jiffies;
+
+ /* get the local reserved page's address */
+
+ preempt_disable();
+ rp_pa = xpc_get_rsvd_page_pa(xp_cpu_to_nasid(smp_processor_id()));
+ preempt_enable();
+ if (rp_pa == 0) {
+ dev_err(xpc_part, "SAL failed to locate the reserved page\n");
+ return -ESRCH;
+ }
+ rp = (struct xpc_rsvd_page *)__va(xp_socket_pa(rp_pa));
+
+ if (rp->SAL_version < 3) {
+ /* SAL_versions < 3 had a SAL_partid defined as a u8 */
+ rp->SAL_partid &= 0xff;
+ }
+ BUG_ON(rp->SAL_partid != xp_partition_id);
+
+ if (rp->SAL_partid < 0 || rp->SAL_partid >= xp_max_npartitions) {
+ dev_err(xpc_part, "the reserved page's partid of %d is outside "
+ "supported range (< 0 || >= %d)\n", rp->SAL_partid,
+ xp_max_npartitions);
+ return -EINVAL;
+ }
+
+ rp->version = XPC_RP_VERSION;
+ rp->max_npartitions = xp_max_npartitions;
+
+ /* establish the actual sizes of the nasid masks */
+ if (rp->SAL_version == 1) {
+ /* SAL_version 1 didn't set the nasids_size field */
+ rp->SAL_nasids_size = 128;
+ }
+ xpc_nasid_mask_nbytes = rp->SAL_nasids_size;
+ xpc_nasid_mask_nlongs = BITS_TO_LONGS(rp->SAL_nasids_size *
+ BITS_PER_BYTE);
+
+ /* setup the pointers to the various items in the reserved page */
+ xpc_part_nasids = XPC_RP_PART_NASIDS(rp);
+ xpc_mach_nasids = XPC_RP_MACH_NASIDS(rp);
+
+ ret = xpc_arch_ops.setup_rsvd_page(rp);
+ if (ret != 0)
+ return ret;
+
+ /*
+ * Set timestamp of when reserved page was setup by XPC.
+ * This signifies to the remote partition that our reserved
+ * page is initialized.
+ */
+ new_ts_jiffies = jiffies;
+ if (new_ts_jiffies == 0 || new_ts_jiffies == rp->ts_jiffies)
+ new_ts_jiffies++;
+ rp->ts_jiffies = new_ts_jiffies;
+
+ xpc_rsvd_page = rp;
+ return 0;
+}
+
+void
+xpc_teardown_rsvd_page(void)
+{
+ /* a zero timestamp indicates our rsvd page is not initialized */
+ xpc_rsvd_page->ts_jiffies = 0;
+}
+
+/*
+ * Get a copy of a portion of the remote partition's rsvd page.
+ *
+ * remote_rp points to a buffer that is cacheline aligned for BTE copies and
+ * is large enough to contain a copy of their reserved page header and
+ * part_nasids mask.
+ */
+enum xp_retval
+xpc_get_remote_rp(int nasid, unsigned long *discovered_nasids,
+ struct xpc_rsvd_page *remote_rp, unsigned long *remote_rp_pa)
+{
+ int l;
+ enum xp_retval ret;
+
+ /* get the reserved page's physical address */
+
+ *remote_rp_pa = xpc_get_rsvd_page_pa(nasid);
+ if (*remote_rp_pa == 0)
+ return xpNoRsvdPageAddr;
+
+ /* pull over the reserved page header and part_nasids mask */
+ ret = xp_remote_memcpy(xp_pa(remote_rp), *remote_rp_pa,
+ XPC_RP_HEADER_SIZE + xpc_nasid_mask_nbytes);
+ if (ret != xpSuccess)
+ return ret;
+
+ if (discovered_nasids != NULL) {
+ unsigned long *remote_part_nasids =
+ XPC_RP_PART_NASIDS(remote_rp);
+
+ for (l = 0; l < xpc_nasid_mask_nlongs; l++)
+ discovered_nasids[l] |= remote_part_nasids[l];
+ }
+
+ /* zero timestamp indicates the reserved page has not been setup */
+ if (remote_rp->ts_jiffies == 0)
+ return xpRsvdPageNotSet;
+
+ if (XPC_VERSION_MAJOR(remote_rp->version) !=
+ XPC_VERSION_MAJOR(XPC_RP_VERSION)) {
+ return xpBadVersion;
+ }
+
+ /* check that both remote and local partids are valid for each side */
+ if (remote_rp->SAL_partid < 0 ||
+ remote_rp->SAL_partid >= xp_max_npartitions ||
+ remote_rp->max_npartitions <= xp_partition_id) {
+ return xpInvalidPartid;
+ }
+
+ if (remote_rp->SAL_partid == xp_partition_id)
+ return xpLocalPartid;
+
+ return xpSuccess;
+}
+
+/*
+ * See if the other side has responded to a partition deactivate request
+ * from us. Though we requested the remote partition to deactivate with regard
+ * to us, we really only need to wait for the other side to disengage from us.
+ */
+int
+xpc_partition_disengaged(struct xpc_partition *part)
+{
+ short partid = XPC_PARTID(part);
+ int disengaged;
+
+ disengaged = !xpc_arch_ops.partition_engaged(partid);
+ if (part->disengage_timeout) {
+ if (!disengaged) {
+ if (time_is_after_jiffies(part->disengage_timeout)) {
+ /* timelimit hasn't been reached yet */
+ return 0;
+ }
+
+ /*
+ * Other side hasn't responded to our deactivate
+ * request in a timely fashion, so assume it's dead.
+ */
+
+ dev_info(xpc_part, "deactivate request to remote "
+ "partition %d timed out\n", partid);
+ xpc_disengage_timedout = 1;
+ xpc_arch_ops.assume_partition_disengaged(partid);
+ disengaged = 1;
+ }
+ part->disengage_timeout = 0;
+
+ /* cancel the timer function, provided it's not us */
+ if (!in_interrupt())
+ del_singleshot_timer_sync(&part->disengage_timer);
+
+ DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING &&
+ part->act_state != XPC_P_AS_INACTIVE);
+ if (part->act_state != XPC_P_AS_INACTIVE)
+ xpc_wakeup_channel_mgr(part);
+
+ xpc_arch_ops.cancel_partition_deactivation_request(part);
+ }
+ return disengaged;
+}
+
+/*
+ * Mark specified partition as active.
+ */
+enum xp_retval
+xpc_mark_partition_active(struct xpc_partition *part)
+{
+ unsigned long irq_flags;
+ enum xp_retval ret;
+
+ dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part));
+
+ spin_lock_irqsave(&part->act_lock, irq_flags);
+ if (part->act_state == XPC_P_AS_ACTIVATING) {
+ part->act_state = XPC_P_AS_ACTIVE;
+ ret = xpSuccess;
+ } else {
+ DBUG_ON(part->reason == xpSuccess);
+ ret = part->reason;
+ }
+ spin_unlock_irqrestore(&part->act_lock, irq_flags);
+
+ return ret;
+}
+
+/*
+ * Start the process of deactivating the specified partition.
+ */
+void
+xpc_deactivate_partition(const int line, struct xpc_partition *part,
+ enum xp_retval reason)
+{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&part->act_lock, irq_flags);
+
+ if (part->act_state == XPC_P_AS_INACTIVE) {
+ XPC_SET_REASON(part, reason, line);
+ spin_unlock_irqrestore(&part->act_lock, irq_flags);
+ if (reason == xpReactivating) {
+ /* we interrupt ourselves to reactivate partition */
+ xpc_arch_ops.request_partition_reactivation(part);
+ }
+ return;
+ }
+ if (part->act_state == XPC_P_AS_DEACTIVATING) {
+ if ((part->reason == xpUnloading && reason != xpUnloading) ||
+ reason == xpReactivating) {
+ XPC_SET_REASON(part, reason, line);
+ }
+ spin_unlock_irqrestore(&part->act_lock, irq_flags);
+ return;
+ }
+
+ part->act_state = XPC_P_AS_DEACTIVATING;
+ XPC_SET_REASON(part, reason, line);
+
+ spin_unlock_irqrestore(&part->act_lock, irq_flags);
+
+ /* ask remote partition to deactivate with regard to us */
+ xpc_arch_ops.request_partition_deactivation(part);
+
+ /* set a timelimit on the disengage phase of the deactivation request */
+ part->disengage_timeout = jiffies + (xpc_disengage_timelimit * HZ);
+ part->disengage_timer.expires = part->disengage_timeout;
+ add_timer(&part->disengage_timer);
+
+ dev_dbg(xpc_part, "bringing partition %d down, reason = %d\n",
+ XPC_PARTID(part), reason);
+
+ xpc_partition_going_down(part, reason);
+}
+
+/*
+ * Mark specified partition as inactive.
+ */
+void
+xpc_mark_partition_inactive(struct xpc_partition *part)
+{
+ unsigned long irq_flags;
+
+ dev_dbg(xpc_part, "setting partition %d to INACTIVE\n",
+ XPC_PARTID(part));
+
+ spin_lock_irqsave(&part->act_lock, irq_flags);
+ part->act_state = XPC_P_AS_INACTIVE;
+ spin_unlock_irqrestore(&part->act_lock, irq_flags);
+ part->remote_rp_pa = 0;
+}
+
+/*
+ * SAL has provided a partition and machine mask. The partition mask
+ * contains a bit for each even nasid in our partition. The machine
+ * mask contains a bit for each even nasid in the entire machine.
+ *
+ * Using those two bit arrays, we can determine which nasids are
+ * known in the machine. Each should also have a reserved page
+ * initialized if they are available for partitioning.
+ */
+void
+xpc_discovery(void)
+{
+ void *remote_rp_base;
+ struct xpc_rsvd_page *remote_rp;
+ unsigned long remote_rp_pa;
+ int region;
+ int region_size;
+ int max_regions;
+ int nasid;
+ struct xpc_rsvd_page *rp;
+ unsigned long *discovered_nasids;
+ enum xp_retval ret;
+
+ remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RP_HEADER_SIZE +
+ xpc_nasid_mask_nbytes,
+ GFP_KERNEL, &remote_rp_base);
+ if (remote_rp == NULL)
+ return;
+
+ discovered_nasids = kzalloc(sizeof(long) * xpc_nasid_mask_nlongs,
+ GFP_KERNEL);
+ if (discovered_nasids == NULL) {
+ kfree(remote_rp_base);
+ return;
+ }
+
+ rp = (struct xpc_rsvd_page *)xpc_rsvd_page;
+
+ /*
+ * The term 'region' in this context refers to the minimum number of
+ * nodes that can comprise an access protection grouping. The access
+ * protection is in regards to memory, IOI and IPI.
+ */
+ region_size = xp_region_size;
+
+ if (is_uv())
+ max_regions = 256;
+ else {
+ max_regions = 64;
+
+ switch (region_size) {
+ case 128:
+ max_regions *= 2;
+ case 64:
+ max_regions *= 2;
+ case 32:
+ max_regions *= 2;
+ region_size = 16;
+ DBUG_ON(!is_shub2());
+ }
+ }
+
+ for (region = 0; region < max_regions; region++) {
+
+ if (xpc_exiting)
+ break;
+
+ dev_dbg(xpc_part, "searching region %d\n", region);
+
+ for (nasid = (region * region_size * 2);
+ nasid < ((region + 1) * region_size * 2); nasid += 2) {
+
+ if (xpc_exiting)
+ break;
+
+ dev_dbg(xpc_part, "checking nasid %d\n", nasid);
+
+ if (test_bit(nasid / 2, xpc_part_nasids)) {
+ dev_dbg(xpc_part, "PROM indicates Nasid %d is "
+ "part of the local partition; skipping "
+ "region\n", nasid);
+ break;
+ }
+
+ if (!(test_bit(nasid / 2, xpc_mach_nasids))) {
+ dev_dbg(xpc_part, "PROM indicates Nasid %d was "
+ "not on Numa-Link network at reset\n",
+ nasid);
+ continue;
+ }
+
+ if (test_bit(nasid / 2, discovered_nasids)) {
+ dev_dbg(xpc_part, "Nasid %d is part of a "
+ "partition which was previously "
+ "discovered\n", nasid);
+ continue;
+ }
+
+ /* pull over the rsvd page header & part_nasids mask */
+
+ ret = xpc_get_remote_rp(nasid, discovered_nasids,
+ remote_rp, &remote_rp_pa);
+ if (ret != xpSuccess) {
+ dev_dbg(xpc_part, "unable to get reserved page "
+ "from nasid %d, reason=%d\n", nasid,
+ ret);
+
+ if (ret == xpLocalPartid)
+ break;
+
+ continue;
+ }
+
+ xpc_arch_ops.request_partition_activation(remote_rp,
+ remote_rp_pa, nasid);
+ }
+ }
+
+ kfree(discovered_nasids);
+ kfree(remote_rp_base);
+}
+
+/*
+ * Given a partid, get the nasids owned by that partition from the
+ * remote partition's reserved page.
+ */
+enum xp_retval
+xpc_initiate_partid_to_nasids(short partid, void *nasid_mask)
+{
+ struct xpc_partition *part;
+ unsigned long part_nasid_pa;
+
+ part = &xpc_partitions[partid];
+ if (part->remote_rp_pa == 0)
+ return xpPartitionDown;
+
+ memset(nasid_mask, 0, xpc_nasid_mask_nbytes);
+
+ part_nasid_pa = (unsigned long)XPC_RP_PART_NASIDS(part->remote_rp_pa);
+
+ return xp_remote_memcpy(xp_pa(nasid_mask), part_nasid_pa,
+ xpc_nasid_mask_nbytes);
+}
diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
new file mode 100644
index 00000000..7d71c04f
--- /dev/null
+++ b/drivers/misc/sgi-xp/xpc_sn2.c
@@ -0,0 +1,2462 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved.
+ */
+
+/*
+ * Cross Partition Communication (XPC) sn2-based functions.
+ *
+ * Architecture specific implementation of common functions.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <asm/uncached.h>
+#include <asm/sn/mspec.h>
+#include <asm/sn/sn_sal.h>
+#include "xpc.h"
+
+/*
+ * Define the number of u64s required to represent all the C-brick nasids
+ * as a bitmap. The cross-partition kernel modules deal only with
+ * C-brick nasids, thus the need for bitmaps which don't account for
+ * odd-numbered (non C-brick) nasids.
+ */
+#define XPC_MAX_PHYSNODES_SN2 (MAX_NUMALINK_NODES / 2)
+#define XP_NASID_MASK_BYTES_SN2 ((XPC_MAX_PHYSNODES_SN2 + 7) / 8)
+#define XP_NASID_MASK_WORDS_SN2 ((XPC_MAX_PHYSNODES_SN2 + 63) / 64)
+
+/*
+ * Memory for XPC's amo variables is allocated by the MSPEC driver. These
+ * pages are located in the lowest granule. The lowest granule uses 4k pages
+ * for cached references and an alternate TLB handler to never provide a
+ * cacheable mapping for the entire region. This will prevent speculative
+ * reading of cached copies of our lines from being issued which will cause
+ * a PI FSB Protocol error to be generated by the SHUB. For XPC, we need 64
+ * amo variables (based on XP_MAX_NPARTITIONS_SN2) to identify the senders of
+ * NOTIFY IRQs, 128 amo variables (based on XP_NASID_MASK_WORDS_SN2) to identify
+ * the senders of ACTIVATE IRQs, 1 amo variable to identify which remote
+ * partitions (i.e., XPCs) consider themselves currently engaged with the
+ * local XPC and 1 amo variable to request partition deactivation.
+ */
+#define XPC_NOTIFY_IRQ_AMOS_SN2 0
+#define XPC_ACTIVATE_IRQ_AMOS_SN2 (XPC_NOTIFY_IRQ_AMOS_SN2 + \
+ XP_MAX_NPARTITIONS_SN2)
+#define XPC_ENGAGED_PARTITIONS_AMO_SN2 (XPC_ACTIVATE_IRQ_AMOS_SN2 + \
+ XP_NASID_MASK_WORDS_SN2)
+#define XPC_DEACTIVATE_REQUEST_AMO_SN2 (XPC_ENGAGED_PARTITIONS_AMO_SN2 + 1)
+
+/*
+ * Buffer used to store a local copy of portions of a remote partition's
+ * reserved page (either its header and part_nasids mask, or its vars).
+ */
+static void *xpc_remote_copy_buffer_base_sn2;
+static char *xpc_remote_copy_buffer_sn2;
+
+static struct xpc_vars_sn2 *xpc_vars_sn2;
+static struct xpc_vars_part_sn2 *xpc_vars_part_sn2;
+
+static int
+xpc_setup_partitions_sn2(void)
+{
+ /* nothing needs to be done */
+ return 0;
+}
+
+static void
+xpc_teardown_partitions_sn2(void)
+{
+ /* nothing needs to be done */
+}
+
+/* SH_IPI_ACCESS shub register value on startup */
+static u64 xpc_sh1_IPI_access_sn2;
+static u64 xpc_sh2_IPI_access0_sn2;
+static u64 xpc_sh2_IPI_access1_sn2;
+static u64 xpc_sh2_IPI_access2_sn2;
+static u64 xpc_sh2_IPI_access3_sn2;
+
+/*
+ * Change protections to allow IPI operations.
+ */
+static void
+xpc_allow_IPI_ops_sn2(void)
+{
+ int node;
+ int nasid;
+
+ /* !!! The following should get moved into SAL. */
+ if (is_shub2()) {
+ xpc_sh2_IPI_access0_sn2 =
+ (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS0));
+ xpc_sh2_IPI_access1_sn2 =
+ (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS1));
+ xpc_sh2_IPI_access2_sn2 =
+ (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS2));
+ xpc_sh2_IPI_access3_sn2 =
+ (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS3));
+
+ for_each_online_node(node) {
+ nasid = cnodeid_to_nasid(node);
+ HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
+ -1UL);
+ HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
+ -1UL);
+ HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
+ -1UL);
+ HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
+ -1UL);
+ }
+ } else {
+ xpc_sh1_IPI_access_sn2 =
+ (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH1_IPI_ACCESS));
+
+ for_each_online_node(node) {
+ nasid = cnodeid_to_nasid(node);
+ HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
+ -1UL);
+ }
+ }
+}
+
+/*
+ * Restrict protections to disallow IPI operations.
+ */
+static void
+xpc_disallow_IPI_ops_sn2(void)
+{
+ int node;
+ int nasid;
+
+ /* !!! The following should get moved into SAL. */
+ if (is_shub2()) {
+ for_each_online_node(node) {
+ nasid = cnodeid_to_nasid(node);
+ HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
+ xpc_sh2_IPI_access0_sn2);
+ HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
+ xpc_sh2_IPI_access1_sn2);
+ HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
+ xpc_sh2_IPI_access2_sn2);
+ HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
+ xpc_sh2_IPI_access3_sn2);
+ }
+ } else {
+ for_each_online_node(node) {
+ nasid = cnodeid_to_nasid(node);
+ HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
+ xpc_sh1_IPI_access_sn2);
+ }
+ }
+}
+
+/*
+ * The following set of functions are used for the sending and receiving of
+ * IRQs (also known as IPIs). There are two flavors of IRQs, one that is
+ * associated with partition activity (SGI_XPC_ACTIVATE) and the other that
+ * is associated with channel activity (SGI_XPC_NOTIFY).
+ */
+
+static u64
+xpc_receive_IRQ_amo_sn2(struct amo *amo)
+{
+ return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_CLEAR);
+}
+
+static enum xp_retval
+xpc_send_IRQ_sn2(struct amo *amo, u64 flag, int nasid, int phys_cpuid,
+ int vector)
+{
+ int ret = 0;
+ unsigned long irq_flags;
+
+ local_irq_save(irq_flags);
+
+ FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR, flag);
+ sn_send_IPI_phys(nasid, phys_cpuid, vector, 0);
+
+ /*
+ * We must always use the nofault function regardless of whether we
+ * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
+ * didn't, we'd never know that the other partition is down and would
+ * keep sending IRQs and amos to it until the heartbeat times out.
+ */
+ ret = xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->variable),
+ xp_nofault_PIOR_target));
+
+ local_irq_restore(irq_flags);
+
+ return (ret == 0) ? xpSuccess : xpPioReadError;
+}
+
+static struct amo *
+xpc_init_IRQ_amo_sn2(int index)
+{
+ struct amo *amo = xpc_vars_sn2->amos_page + index;
+
+ (void)xpc_receive_IRQ_amo_sn2(amo); /* clear amo variable */
+ return amo;
+}
+
+/*
+ * Functions associated with SGI_XPC_ACTIVATE IRQ.
+ */
+
+/*
+ * Notify the heartbeat check thread that an activate IRQ has been received.
+ */
+static irqreturn_t
+xpc_handle_activate_IRQ_sn2(int irq, void *dev_id)
+{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
+ xpc_activate_IRQ_rcvd++;
+ spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
+
+ wake_up_interruptible(&xpc_activate_IRQ_wq);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Flag the appropriate amo variable and send an IRQ to the specified node.
+ */
+static void
+xpc_send_activate_IRQ_sn2(unsigned long amos_page_pa, int from_nasid,
+ int to_nasid, int to_phys_cpuid)
+{
+ struct amo *amos = (struct amo *)__va(amos_page_pa +
+ (XPC_ACTIVATE_IRQ_AMOS_SN2 *
+ sizeof(struct amo)));
+
+ (void)xpc_send_IRQ_sn2(&amos[BIT_WORD(from_nasid / 2)],
+ BIT_MASK(from_nasid / 2), to_nasid,
+ to_phys_cpuid, SGI_XPC_ACTIVATE);
+}
+
+static void
+xpc_send_local_activate_IRQ_sn2(int from_nasid)
+{
+ unsigned long irq_flags;
+ struct amo *amos = (struct amo *)__va(xpc_vars_sn2->amos_page_pa +
+ (XPC_ACTIVATE_IRQ_AMOS_SN2 *
+ sizeof(struct amo)));
+
+ /* fake the sending and receipt of an activate IRQ from remote nasid */
+ FETCHOP_STORE_OP(TO_AMO((u64)&amos[BIT_WORD(from_nasid / 2)].variable),
+ FETCHOP_OR, BIT_MASK(from_nasid / 2));
+
+ spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
+ xpc_activate_IRQ_rcvd++;
+ spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
+
+ wake_up_interruptible(&xpc_activate_IRQ_wq);
+}
+
+/*
+ * Functions associated with SGI_XPC_NOTIFY IRQ.
+ */
+
+/*
+ * Check to see if any chctl flags were sent from the specified partition.
+ */
+static void
+xpc_check_for_sent_chctl_flags_sn2(struct xpc_partition *part)
+{
+ union xpc_channel_ctl_flags chctl;
+ unsigned long irq_flags;
+
+ chctl.all_flags = xpc_receive_IRQ_amo_sn2(part->sn.sn2.
+ local_chctl_amo_va);
+ if (chctl.all_flags == 0)
+ return;
+
+ spin_lock_irqsave(&part->chctl_lock, irq_flags);
+ part->chctl.all_flags |= chctl.all_flags;
+ spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
+
+ dev_dbg(xpc_chan, "received notify IRQ from partid=%d, chctl.all_flags="
+ "0x%llx\n", XPC_PARTID(part), chctl.all_flags);
+
+ xpc_wakeup_channel_mgr(part);
+}
+
+/*
+ * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified
+ * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more
+ * than one partition, we use an amo structure per partition to indicate
+ * whether a partition has sent an IRQ or not. If it has, then wake up the
+ * associated kthread to handle it.
+ *
+ * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IRQs sent by XPC
+ * running on other partitions.
+ *
+ * Noteworthy Arguments:
+ *
+ * irq - Interrupt ReQuest number. NOT USED.
+ *
+ * dev_id - partid of IRQ's potential sender.
+ */
+static irqreturn_t
+xpc_handle_notify_IRQ_sn2(int irq, void *dev_id)
+{
+ short partid = (short)(u64)dev_id;
+ struct xpc_partition *part = &xpc_partitions[partid];
+
+ DBUG_ON(partid < 0 || partid >= XP_MAX_NPARTITIONS_SN2);
+
+ if (xpc_part_ref(part)) {
+ xpc_check_for_sent_chctl_flags_sn2(part);
+
+ xpc_part_deref(part);
+ }
+ return IRQ_HANDLED;
+}
+
+/*
+ * Check to see if xpc_handle_notify_IRQ_sn2() dropped any IRQs on the floor
+ * because the write to their associated amo variable completed after the IRQ
+ * was received.
+ */
+static void
+xpc_check_for_dropped_notify_IRQ_sn2(struct xpc_partition *part)
+{
+ struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
+
+ if (xpc_part_ref(part)) {
+ xpc_check_for_sent_chctl_flags_sn2(part);
+
+ part_sn2->dropped_notify_IRQ_timer.expires = jiffies +
+ XPC_DROPPED_NOTIFY_IRQ_WAIT_INTERVAL;
+ add_timer(&part_sn2->dropped_notify_IRQ_timer);
+ xpc_part_deref(part);
+ }
+}
+
+/*
+ * Send a notify IRQ to the remote partition that is associated with the
+ * specified channel.
+ */
+static void
+xpc_send_notify_IRQ_sn2(struct xpc_channel *ch, u8 chctl_flag,
+ char *chctl_flag_string, unsigned long *irq_flags)
+{
+ struct xpc_partition *part = &xpc_partitions[ch->partid];
+ struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
+ union xpc_channel_ctl_flags chctl = { 0 };
+ enum xp_retval ret;
+
+ if (likely(part->act_state != XPC_P_AS_DEACTIVATING)) {
+ chctl.flags[ch->number] = chctl_flag;
+ ret = xpc_send_IRQ_sn2(part_sn2->remote_chctl_amo_va,
+ chctl.all_flags,
+ part_sn2->notify_IRQ_nasid,
+ part_sn2->notify_IRQ_phys_cpuid,
+ SGI_XPC_NOTIFY);
+ dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n",
+ chctl_flag_string, ch->partid, ch->number, ret);
+ if (unlikely(ret != xpSuccess)) {
+ if (irq_flags != NULL)
+ spin_unlock_irqrestore(&ch->lock, *irq_flags);
+ XPC_DEACTIVATE_PARTITION(part, ret);
+ if (irq_flags != NULL)
+ spin_lock_irqsave(&ch->lock, *irq_flags);
+ }
+ }
+}
+
+#define XPC_SEND_NOTIFY_IRQ_SN2(_ch, _ipi_f, _irq_f) \
+ xpc_send_notify_IRQ_sn2(_ch, _ipi_f, #_ipi_f, _irq_f)
+
+/*
+ * Make it look like the remote partition, which is associated with the
+ * specified channel, sent us a notify IRQ. This faked IRQ will be handled
+ * by xpc_check_for_dropped_notify_IRQ_sn2().
+ */
+static void
+xpc_send_local_notify_IRQ_sn2(struct xpc_channel *ch, u8 chctl_flag,
+ char *chctl_flag_string)
+{
+ struct xpc_partition *part = &xpc_partitions[ch->partid];
+ union xpc_channel_ctl_flags chctl = { 0 };
+
+ chctl.flags[ch->number] = chctl_flag;
+ FETCHOP_STORE_OP(TO_AMO((u64)&part->sn.sn2.local_chctl_amo_va->
+ variable), FETCHOP_OR, chctl.all_flags);
+ dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n",
+ chctl_flag_string, ch->partid, ch->number);
+}
+
+#define XPC_SEND_LOCAL_NOTIFY_IRQ_SN2(_ch, _ipi_f) \
+ xpc_send_local_notify_IRQ_sn2(_ch, _ipi_f, #_ipi_f)
+
+static void
+xpc_send_chctl_closerequest_sn2(struct xpc_channel *ch,
+ unsigned long *irq_flags)
+{
+ struct xpc_openclose_args *args = ch->sn.sn2.local_openclose_args;
+
+ args->reason = ch->reason;
+ XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_CLOSEREQUEST, irq_flags);
+}
+
+static void
+xpc_send_chctl_closereply_sn2(struct xpc_channel *ch, unsigned long *irq_flags)
+{
+ XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_CLOSEREPLY, irq_flags);
+}
+
+static void
+xpc_send_chctl_openrequest_sn2(struct xpc_channel *ch, unsigned long *irq_flags)
+{
+ struct xpc_openclose_args *args = ch->sn.sn2.local_openclose_args;
+
+ args->entry_size = ch->entry_size;
+ args->local_nentries = ch->local_nentries;
+ XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_OPENREQUEST, irq_flags);
+}
+
+static void
+xpc_send_chctl_openreply_sn2(struct xpc_channel *ch, unsigned long *irq_flags)
+{
+ struct xpc_openclose_args *args = ch->sn.sn2.local_openclose_args;
+
+ args->remote_nentries = ch->remote_nentries;
+ args->local_nentries = ch->local_nentries;
+ args->local_msgqueue_pa = xp_pa(ch->sn.sn2.local_msgqueue);
+ XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_OPENREPLY, irq_flags);
+}
+
+static void
+xpc_send_chctl_opencomplete_sn2(struct xpc_channel *ch,
+ unsigned long *irq_flags)
+{
+ XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_OPENCOMPLETE, irq_flags);
+}
+
+static void
+xpc_send_chctl_msgrequest_sn2(struct xpc_channel *ch)
+{
+ XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_MSGREQUEST, NULL);
+}
+
+static void
+xpc_send_chctl_local_msgrequest_sn2(struct xpc_channel *ch)
+{
+ XPC_SEND_LOCAL_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_MSGREQUEST);
+}
+
+static enum xp_retval
+xpc_save_remote_msgqueue_pa_sn2(struct xpc_channel *ch,
+ unsigned long msgqueue_pa)
+{
+ ch->sn.sn2.remote_msgqueue_pa = msgqueue_pa;
+ return xpSuccess;
+}
+
+/*
+ * This next set of functions are used to keep track of when a partition is
+ * potentially engaged in accessing memory belonging to another partition.
+ */
+
+static void
+xpc_indicate_partition_engaged_sn2(struct xpc_partition *part)
+{
+ unsigned long irq_flags;
+ struct amo *amo = (struct amo *)__va(part->sn.sn2.remote_amos_page_pa +
+ (XPC_ENGAGED_PARTITIONS_AMO_SN2 *
+ sizeof(struct amo)));
+
+ local_irq_save(irq_flags);
+
+ /* set bit corresponding to our partid in remote partition's amo */
+ FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR,
+ BIT(sn_partition_id));
+
+ /*
+ * We must always use the nofault function regardless of whether we
+ * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
+ * didn't, we'd never know that the other partition is down and would
+ * keep sending IRQs and amos to it until the heartbeat times out.
+ */
+ (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
+ variable),
+ xp_nofault_PIOR_target));
+
+ local_irq_restore(irq_flags);
+}
+
+static void
+xpc_indicate_partition_disengaged_sn2(struct xpc_partition *part)
+{
+ struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
+ unsigned long irq_flags;
+ struct amo *amo = (struct amo *)__va(part_sn2->remote_amos_page_pa +
+ (XPC_ENGAGED_PARTITIONS_AMO_SN2 *
+ sizeof(struct amo)));
+
+ local_irq_save(irq_flags);
+
+ /* clear bit corresponding to our partid in remote partition's amo */
+ FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
+ ~BIT(sn_partition_id));
+
+ /*
+ * We must always use the nofault function regardless of whether we
+ * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
+ * didn't, we'd never know that the other partition is down and would
+ * keep sending IRQs and amos to it until the heartbeat times out.
+ */
+ (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
+ variable),
+ xp_nofault_PIOR_target));
+
+ local_irq_restore(irq_flags);
+
+ /*
+ * Send activate IRQ to get other side to see that we've cleared our
+ * bit in their engaged partitions amo.
+ */
+ xpc_send_activate_IRQ_sn2(part_sn2->remote_amos_page_pa,
+ cnodeid_to_nasid(0),
+ part_sn2->activate_IRQ_nasid,
+ part_sn2->activate_IRQ_phys_cpuid);
+}
+
+static void
+xpc_assume_partition_disengaged_sn2(short partid)
+{
+ struct amo *amo = xpc_vars_sn2->amos_page +
+ XPC_ENGAGED_PARTITIONS_AMO_SN2;
+
+ /* clear bit(s) based on partid mask in our partition's amo */
+ FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
+ ~BIT(partid));
+}
+
+static int
+xpc_partition_engaged_sn2(short partid)
+{
+ struct amo *amo = xpc_vars_sn2->amos_page +
+ XPC_ENGAGED_PARTITIONS_AMO_SN2;
+
+ /* our partition's amo variable ANDed with partid mask */
+ return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) &
+ BIT(partid)) != 0;
+}
+
+static int
+xpc_any_partition_engaged_sn2(void)
+{
+ struct amo *amo = xpc_vars_sn2->amos_page +
+ XPC_ENGAGED_PARTITIONS_AMO_SN2;
+
+ /* our partition's amo variable */
+ return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) != 0;
+}
+
+/* original protection values for each node */
+static u64 xpc_prot_vec_sn2[MAX_NUMNODES];
+
+/*
+ * Change protections to allow amo operations on non-Shub 1.1 systems.
+ */
+static enum xp_retval
+xpc_allow_amo_ops_sn2(struct amo *amos_page)
+{
+ enum xp_retval ret = xpSuccess;
+
+ /*
+ * On SHUB 1.1, we cannot call sn_change_memprotect() since the BIST
+ * collides with memory operations. On those systems we call
+ * xpc_allow_amo_ops_shub_wars_1_1_sn2() instead.
+ */
+ if (!enable_shub_wars_1_1())
+ ret = xp_expand_memprotect(ia64_tpa((u64)amos_page), PAGE_SIZE);
+
+ return ret;
+}
+
+/*
+ * Change protections to allow amo operations on Shub 1.1 systems.
+ */
+static void
+xpc_allow_amo_ops_shub_wars_1_1_sn2(void)
+{
+ int node;
+ int nasid;
+
+ if (!enable_shub_wars_1_1())
+ return;
+
+ for_each_online_node(node) {
+ nasid = cnodeid_to_nasid(node);
+ /* save current protection values */
+ xpc_prot_vec_sn2[node] =
+ (u64)HUB_L((u64 *)GLOBAL_MMR_ADDR(nasid,
+ SH1_MD_DQLP_MMR_DIR_PRIVEC0));
+ /* open up everything */
+ HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid,
+ SH1_MD_DQLP_MMR_DIR_PRIVEC0),
+ -1UL);
+ HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid,
+ SH1_MD_DQRP_MMR_DIR_PRIVEC0),
+ -1UL);
+ }
+}
+
+static enum xp_retval
+xpc_get_partition_rsvd_page_pa_sn2(void *buf, u64 *cookie, unsigned long *rp_pa,
+ size_t *len)
+{
+ s64 status;
+ enum xp_retval ret;
+
+ status = sn_partition_reserved_page_pa((u64)buf, cookie,
+ (u64 *)rp_pa, (u64 *)len);
+ if (status == SALRET_OK)
+ ret = xpSuccess;
+ else if (status == SALRET_MORE_PASSES)
+ ret = xpNeedMoreInfo;
+ else
+ ret = xpSalError;
+
+ return ret;
+}
+
+
+static int
+xpc_setup_rsvd_page_sn2(struct xpc_rsvd_page *rp)
+{
+ struct amo *amos_page;
+ int i;
+ int ret;
+
+ xpc_vars_sn2 = XPC_RP_VARS(rp);
+
+ rp->sn.sn2.vars_pa = xp_pa(xpc_vars_sn2);
+
+ /* vars_part array follows immediately after vars */
+ xpc_vars_part_sn2 = (struct xpc_vars_part_sn2 *)((u8 *)XPC_RP_VARS(rp) +
+ XPC_RP_VARS_SIZE);
+
+ /*
+ * Before clearing xpc_vars_sn2, see if a page of amos had been
+ * previously allocated. If not we'll need to allocate one and set
+ * permissions so that cross-partition amos are allowed.
+ *
+ * The allocated amo page needs MCA reporting to remain disabled after
+ * XPC has unloaded. To make this work, we keep a copy of the pointer
+ * to this page (i.e., amos_page) in the struct xpc_vars_sn2 structure,
+ * which is pointed to by the reserved page, and re-use that saved copy
+ * on subsequent loads of XPC. This amo page is never freed, and its
+ * memory protections are never restricted.
+ */
+ amos_page = xpc_vars_sn2->amos_page;
+ if (amos_page == NULL) {
+ amos_page = (struct amo *)TO_AMO(uncached_alloc_page(0, 1));
+ if (amos_page == NULL) {
+ dev_err(xpc_part, "can't allocate page of amos\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * Open up amo-R/W to cpu. This is done on Shub 1.1 systems
+ * when xpc_allow_amo_ops_shub_wars_1_1_sn2() is called.
+ */
+ ret = xpc_allow_amo_ops_sn2(amos_page);
+ if (ret != xpSuccess) {
+ dev_err(xpc_part, "can't allow amo operations\n");
+ uncached_free_page(__IA64_UNCACHED_OFFSET |
+ TO_PHYS((u64)amos_page), 1);
+ return -EPERM;
+ }
+ }
+
+ /* clear xpc_vars_sn2 */
+ memset(xpc_vars_sn2, 0, sizeof(struct xpc_vars_sn2));
+
+ xpc_vars_sn2->version = XPC_V_VERSION;
+ xpc_vars_sn2->activate_IRQ_nasid = cpuid_to_nasid(0);
+ xpc_vars_sn2->activate_IRQ_phys_cpuid = cpu_physical_id(0);
+ xpc_vars_sn2->vars_part_pa = xp_pa(xpc_vars_part_sn2);
+ xpc_vars_sn2->amos_page_pa = ia64_tpa((u64)amos_page);
+ xpc_vars_sn2->amos_page = amos_page; /* save for next load of XPC */
+
+ /* clear xpc_vars_part_sn2 */
+ memset((u64 *)xpc_vars_part_sn2, 0, sizeof(struct xpc_vars_part_sn2) *
+ XP_MAX_NPARTITIONS_SN2);
+
+ /* initialize the activate IRQ related amo variables */
+ for (i = 0; i < xpc_nasid_mask_nlongs; i++)
+ (void)xpc_init_IRQ_amo_sn2(XPC_ACTIVATE_IRQ_AMOS_SN2 + i);
+
+ /* initialize the engaged remote partitions related amo variables */
+ (void)xpc_init_IRQ_amo_sn2(XPC_ENGAGED_PARTITIONS_AMO_SN2);
+ (void)xpc_init_IRQ_amo_sn2(XPC_DEACTIVATE_REQUEST_AMO_SN2);
+
+ return 0;
+}
+
+static int
+xpc_hb_allowed_sn2(short partid, void *heartbeating_to_mask)
+{
+ return test_bit(partid, heartbeating_to_mask);
+}
+
+static void
+xpc_allow_hb_sn2(short partid)
+{
+ DBUG_ON(xpc_vars_sn2 == NULL);
+ set_bit(partid, xpc_vars_sn2->heartbeating_to_mask);
+}
+
+static void
+xpc_disallow_hb_sn2(short partid)
+{
+ DBUG_ON(xpc_vars_sn2 == NULL);
+ clear_bit(partid, xpc_vars_sn2->heartbeating_to_mask);
+}
+
+static void
+xpc_disallow_all_hbs_sn2(void)
+{
+ DBUG_ON(xpc_vars_sn2 == NULL);
+ bitmap_zero(xpc_vars_sn2->heartbeating_to_mask, xp_max_npartitions);
+}
+
+static void
+xpc_increment_heartbeat_sn2(void)
+{
+ xpc_vars_sn2->heartbeat++;
+}
+
+static void
+xpc_offline_heartbeat_sn2(void)
+{
+ xpc_increment_heartbeat_sn2();
+ xpc_vars_sn2->heartbeat_offline = 1;
+}
+
+static void
+xpc_online_heartbeat_sn2(void)
+{
+ xpc_increment_heartbeat_sn2();
+ xpc_vars_sn2->heartbeat_offline = 0;
+}
+
+static void
+xpc_heartbeat_init_sn2(void)
+{
+ DBUG_ON(xpc_vars_sn2 == NULL);
+
+ bitmap_zero(xpc_vars_sn2->heartbeating_to_mask, XP_MAX_NPARTITIONS_SN2);
+ xpc_online_heartbeat_sn2();
+}
+
+static void
+xpc_heartbeat_exit_sn2(void)
+{
+ xpc_offline_heartbeat_sn2();
+}
+
+static enum xp_retval
+xpc_get_remote_heartbeat_sn2(struct xpc_partition *part)
+{
+ struct xpc_vars_sn2 *remote_vars;
+ enum xp_retval ret;
+
+ remote_vars = (struct xpc_vars_sn2 *)xpc_remote_copy_buffer_sn2;
+
+ /* pull the remote vars structure that contains the heartbeat */
+ ret = xp_remote_memcpy(xp_pa(remote_vars),
+ part->sn.sn2.remote_vars_pa,
+ XPC_RP_VARS_SIZE);
+ if (ret != xpSuccess)
+ return ret;
+
+ dev_dbg(xpc_part, "partid=%d, heartbeat=%lld, last_heartbeat=%lld, "
+ "heartbeat_offline=%lld, HB_mask[0]=0x%lx\n", XPC_PARTID(part),
+ remote_vars->heartbeat, part->last_heartbeat,
+ remote_vars->heartbeat_offline,
+ remote_vars->heartbeating_to_mask[0]);
+
+ if ((remote_vars->heartbeat == part->last_heartbeat &&
+ !remote_vars->heartbeat_offline) ||
+ !xpc_hb_allowed_sn2(sn_partition_id,
+ remote_vars->heartbeating_to_mask)) {
+ ret = xpNoHeartbeat;
+ } else {
+ part->last_heartbeat = remote_vars->heartbeat;
+ }
+
+ return ret;
+}
+
+/*
+ * Get a copy of the remote partition's XPC variables from the reserved page.
+ *
+ * remote_vars points to a buffer that is cacheline aligned for BTE copies and
+ * assumed to be of size XPC_RP_VARS_SIZE.
+ */
+static enum xp_retval
+xpc_get_remote_vars_sn2(unsigned long remote_vars_pa,
+ struct xpc_vars_sn2 *remote_vars)
+{
+ enum xp_retval ret;
+
+ if (remote_vars_pa == 0)
+ return xpVarsNotSet;
+
+ /* pull over the cross partition variables */
+ ret = xp_remote_memcpy(xp_pa(remote_vars), remote_vars_pa,
+ XPC_RP_VARS_SIZE);
+ if (ret != xpSuccess)
+ return ret;
+
+ if (XPC_VERSION_MAJOR(remote_vars->version) !=
+ XPC_VERSION_MAJOR(XPC_V_VERSION)) {
+ return xpBadVersion;
+ }
+
+ return xpSuccess;
+}
+
+static void
+xpc_request_partition_activation_sn2(struct xpc_rsvd_page *remote_rp,
+ unsigned long remote_rp_pa, int nasid)
+{
+ xpc_send_local_activate_IRQ_sn2(nasid);
+}
+
+static void
+xpc_request_partition_reactivation_sn2(struct xpc_partition *part)
+{
+ xpc_send_local_activate_IRQ_sn2(part->sn.sn2.activate_IRQ_nasid);
+}
+
+static void
+xpc_request_partition_deactivation_sn2(struct xpc_partition *part)
+{
+ struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
+ unsigned long irq_flags;
+ struct amo *amo = (struct amo *)__va(part_sn2->remote_amos_page_pa +
+ (XPC_DEACTIVATE_REQUEST_AMO_SN2 *
+ sizeof(struct amo)));
+
+ local_irq_save(irq_flags);
+
+ /* set bit corresponding to our partid in remote partition's amo */
+ FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR,
+ BIT(sn_partition_id));
+
+ /*
+ * We must always use the nofault function regardless of whether we
+ * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
+ * didn't, we'd never know that the other partition is down and would
+ * keep sending IRQs and amos to it until the heartbeat times out.
+ */
+ (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
+ variable),
+ xp_nofault_PIOR_target));
+
+ local_irq_restore(irq_flags);
+
+ /*
+ * Send activate IRQ to get other side to see that we've set our
+ * bit in their deactivate request amo.
+ */
+ xpc_send_activate_IRQ_sn2(part_sn2->remote_amos_page_pa,
+ cnodeid_to_nasid(0),
+ part_sn2->activate_IRQ_nasid,
+ part_sn2->activate_IRQ_phys_cpuid);
+}
+
+static void
+xpc_cancel_partition_deactivation_request_sn2(struct xpc_partition *part)
+{
+ unsigned long irq_flags;
+ struct amo *amo = (struct amo *)__va(part->sn.sn2.remote_amos_page_pa +
+ (XPC_DEACTIVATE_REQUEST_AMO_SN2 *
+ sizeof(struct amo)));
+
+ local_irq_save(irq_flags);
+
+ /* clear bit corresponding to our partid in remote partition's amo */
+ FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
+ ~BIT(sn_partition_id));
+
+ /*
+ * We must always use the nofault function regardless of whether we
+ * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
+ * didn't, we'd never know that the other partition is down and would
+ * keep sending IRQs and amos to it until the heartbeat times out.
+ */
+ (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
+ variable),
+ xp_nofault_PIOR_target));
+
+ local_irq_restore(irq_flags);
+}
+
+static int
+xpc_partition_deactivation_requested_sn2(short partid)
+{
+ struct amo *amo = xpc_vars_sn2->amos_page +
+ XPC_DEACTIVATE_REQUEST_AMO_SN2;
+
+ /* our partition's amo variable ANDed with partid mask */
+ return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) &
+ BIT(partid)) != 0;
+}
+
+/*
+ * Update the remote partition's info.
+ */
+static void
+xpc_update_partition_info_sn2(struct xpc_partition *part, u8 remote_rp_version,
+ unsigned long *remote_rp_ts_jiffies,
+ unsigned long remote_rp_pa,
+ unsigned long remote_vars_pa,
+ struct xpc_vars_sn2 *remote_vars)
+{
+ struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
+
+ part->remote_rp_version = remote_rp_version;
+ dev_dbg(xpc_part, " remote_rp_version = 0x%016x\n",
+ part->remote_rp_version);
+
+ part->remote_rp_ts_jiffies = *remote_rp_ts_jiffies;
+ dev_dbg(xpc_part, " remote_rp_ts_jiffies = 0x%016lx\n",
+ part->remote_rp_ts_jiffies);
+
+ part->remote_rp_pa = remote_rp_pa;
+ dev_dbg(xpc_part, " remote_rp_pa = 0x%016lx\n", part->remote_rp_pa);
+
+ part_sn2->remote_vars_pa = remote_vars_pa;
+ dev_dbg(xpc_part, " remote_vars_pa = 0x%016lx\n",
+ part_sn2->remote_vars_pa);
+
+ part->last_heartbeat = remote_vars->heartbeat - 1;
+ dev_dbg(xpc_part, " last_heartbeat = 0x%016llx\n",
+ part->last_heartbeat);
+
+ part_sn2->remote_vars_part_pa = remote_vars->vars_part_pa;
+ dev_dbg(xpc_part, " remote_vars_part_pa = 0x%016lx\n",
+ part_sn2->remote_vars_part_pa);
+
+ part_sn2->activate_IRQ_nasid = remote_vars->activate_IRQ_nasid;
+ dev_dbg(xpc_part, " activate_IRQ_nasid = 0x%x\n",
+ part_sn2->activate_IRQ_nasid);
+
+ part_sn2->activate_IRQ_phys_cpuid =
+ remote_vars->activate_IRQ_phys_cpuid;
+ dev_dbg(xpc_part, " activate_IRQ_phys_cpuid = 0x%x\n",
+ part_sn2->activate_IRQ_phys_cpuid);
+
+ part_sn2->remote_amos_page_pa = remote_vars->amos_page_pa;
+ dev_dbg(xpc_part, " remote_amos_page_pa = 0x%lx\n",
+ part_sn2->remote_amos_page_pa);
+
+ part_sn2->remote_vars_version = remote_vars->version;
+ dev_dbg(xpc_part, " remote_vars_version = 0x%x\n",
+ part_sn2->remote_vars_version);
+}
+
+/*
+ * Prior code has determined the nasid which generated a activate IRQ.
+ * Inspect that nasid to determine if its partition needs to be activated
+ * or deactivated.
+ *
+ * A partition is considered "awaiting activation" if our partition
+ * flags indicate it is not active and it has a heartbeat. A
+ * partition is considered "awaiting deactivation" if our partition
+ * flags indicate it is active but it has no heartbeat or it is not
+ * sending its heartbeat to us.
+ *
+ * To determine the heartbeat, the remote nasid must have a properly
+ * initialized reserved page.
+ */
+static void
+xpc_identify_activate_IRQ_req_sn2(int nasid)
+{
+ struct xpc_rsvd_page *remote_rp;
+ struct xpc_vars_sn2 *remote_vars;
+ unsigned long remote_rp_pa;
+ unsigned long remote_vars_pa;
+ int remote_rp_version;
+ int reactivate = 0;
+ unsigned long remote_rp_ts_jiffies = 0;
+ short partid;
+ struct xpc_partition *part;
+ struct xpc_partition_sn2 *part_sn2;
+ enum xp_retval ret;
+
+ /* pull over the reserved page structure */
+
+ remote_rp = (struct xpc_rsvd_page *)xpc_remote_copy_buffer_sn2;
+
+ ret = xpc_get_remote_rp(nasid, NULL, remote_rp, &remote_rp_pa);
+ if (ret != xpSuccess) {
+ dev_warn(xpc_part, "unable to get reserved page from nasid %d, "
+ "which sent interrupt, reason=%d\n", nasid, ret);
+ return;
+ }
+
+ remote_vars_pa = remote_rp->sn.sn2.vars_pa;
+ remote_rp_version = remote_rp->version;
+ remote_rp_ts_jiffies = remote_rp->ts_jiffies;
+
+ partid = remote_rp->SAL_partid;
+ part = &xpc_partitions[partid];
+ part_sn2 = &part->sn.sn2;
+
+ /* pull over the cross partition variables */
+
+ remote_vars = (struct xpc_vars_sn2 *)xpc_remote_copy_buffer_sn2;
+
+ ret = xpc_get_remote_vars_sn2(remote_vars_pa, remote_vars);
+ if (ret != xpSuccess) {
+ dev_warn(xpc_part, "unable to get XPC variables from nasid %d, "
+ "which sent interrupt, reason=%d\n", nasid, ret);
+
+ XPC_DEACTIVATE_PARTITION(part, ret);
+ return;
+ }
+
+ part->activate_IRQ_rcvd++;
+
+ dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = "
+ "%lld:0x%lx\n", (int)nasid, (int)partid,
+ part->activate_IRQ_rcvd,
+ remote_vars->heartbeat, remote_vars->heartbeating_to_mask[0]);
+
+ if (xpc_partition_disengaged(part) &&
+ part->act_state == XPC_P_AS_INACTIVE) {
+
+ xpc_update_partition_info_sn2(part, remote_rp_version,
+ &remote_rp_ts_jiffies,
+ remote_rp_pa, remote_vars_pa,
+ remote_vars);
+
+ if (xpc_partition_deactivation_requested_sn2(partid)) {
+ /*
+ * Other side is waiting on us to deactivate even though
+ * we already have.
+ */
+ return;
+ }
+
+ xpc_activate_partition(part);
+ return;
+ }
+
+ DBUG_ON(part->remote_rp_version == 0);
+ DBUG_ON(part_sn2->remote_vars_version == 0);
+
+ if (remote_rp_ts_jiffies != part->remote_rp_ts_jiffies) {
+
+ /* the other side rebooted */
+
+ DBUG_ON(xpc_partition_engaged_sn2(partid));
+ DBUG_ON(xpc_partition_deactivation_requested_sn2(partid));
+
+ xpc_update_partition_info_sn2(part, remote_rp_version,
+ &remote_rp_ts_jiffies,
+ remote_rp_pa, remote_vars_pa,
+ remote_vars);
+ reactivate = 1;
+ }
+
+ if (part->disengage_timeout > 0 && !xpc_partition_disengaged(part)) {
+ /* still waiting on other side to disengage from us */
+ return;
+ }
+
+ if (reactivate)
+ XPC_DEACTIVATE_PARTITION(part, xpReactivating);
+ else if (xpc_partition_deactivation_requested_sn2(partid))
+ XPC_DEACTIVATE_PARTITION(part, xpOtherGoingDown);
+}
+
+/*
+ * Loop through the activation amo variables and process any bits
+ * which are set. Each bit indicates a nasid sending a partition
+ * activation or deactivation request.
+ *
+ * Return #of IRQs detected.
+ */
+int
+xpc_identify_activate_IRQ_sender_sn2(void)
+{
+ int l;
+ int b;
+ unsigned long nasid_mask_long;
+ u64 nasid; /* remote nasid */
+ int n_IRQs_detected = 0;
+ struct amo *act_amos;
+
+ act_amos = xpc_vars_sn2->amos_page + XPC_ACTIVATE_IRQ_AMOS_SN2;
+
+ /* scan through activate amo variables looking for non-zero entries */
+ for (l = 0; l < xpc_nasid_mask_nlongs; l++) {
+
+ if (xpc_exiting)
+ break;
+
+ nasid_mask_long = xpc_receive_IRQ_amo_sn2(&act_amos[l]);
+
+ b = find_first_bit(&nasid_mask_long, BITS_PER_LONG);
+ if (b >= BITS_PER_LONG) {
+ /* no IRQs from nasids in this amo variable */
+ continue;
+ }
+
+ dev_dbg(xpc_part, "amo[%d] gave back 0x%lx\n", l,
+ nasid_mask_long);
+
+ /*
+ * If this nasid has been added to the machine since
+ * our partition was reset, this will retain the
+ * remote nasid in our reserved pages machine mask.
+ * This is used in the event of module reload.
+ */
+ xpc_mach_nasids[l] |= nasid_mask_long;
+
+ /* locate the nasid(s) which sent interrupts */
+
+ do {
+ n_IRQs_detected++;
+ nasid = (l * BITS_PER_LONG + b) * 2;
+ dev_dbg(xpc_part, "interrupt from nasid %lld\n", nasid);
+ xpc_identify_activate_IRQ_req_sn2(nasid);
+
+ b = find_next_bit(&nasid_mask_long, BITS_PER_LONG,
+ b + 1);
+ } while (b < BITS_PER_LONG);
+ }
+ return n_IRQs_detected;
+}
+
+static void
+xpc_process_activate_IRQ_rcvd_sn2(void)
+{
+ unsigned long irq_flags;
+ int n_IRQs_expected;
+ int n_IRQs_detected;
+
+ spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
+ n_IRQs_expected = xpc_activate_IRQ_rcvd;
+ xpc_activate_IRQ_rcvd = 0;
+ spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
+
+ n_IRQs_detected = xpc_identify_activate_IRQ_sender_sn2();
+ if (n_IRQs_detected < n_IRQs_expected) {
+ /* retry once to help avoid missing amo */
+ (void)xpc_identify_activate_IRQ_sender_sn2();
+ }
+}
+
+/*
+ * Setup the channel structures that are sn2 specific.
+ */
+static enum xp_retval
+xpc_setup_ch_structures_sn2(struct xpc_partition *part)
+{
+ struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
+ struct xpc_channel_sn2 *ch_sn2;
+ enum xp_retval retval;
+ int ret;
+ int cpuid;
+ int ch_number;
+ struct timer_list *timer;
+ short partid = XPC_PARTID(part);
+
+ /* allocate all the required GET/PUT values */
+
+ part_sn2->local_GPs =
+ xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE, GFP_KERNEL,
+ &part_sn2->local_GPs_base);
+ if (part_sn2->local_GPs == NULL) {
+ dev_err(xpc_chan, "can't get memory for local get/put "
+ "values\n");
+ return xpNoMemory;
+ }
+
+ part_sn2->remote_GPs =
+ xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE, GFP_KERNEL,
+ &part_sn2->remote_GPs_base);
+ if (part_sn2->remote_GPs == NULL) {
+ dev_err(xpc_chan, "can't get memory for remote get/put "
+ "values\n");
+ retval = xpNoMemory;
+ goto out_1;
+ }
+
+ part_sn2->remote_GPs_pa = 0;
+
+ /* allocate all the required open and close args */
+
+ part_sn2->local_openclose_args =
+ xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE,
+ GFP_KERNEL, &part_sn2->
+ local_openclose_args_base);
+ if (part_sn2->local_openclose_args == NULL) {
+ dev_err(xpc_chan, "can't get memory for local connect args\n");
+ retval = xpNoMemory;
+ goto out_2;
+ }
+
+ part_sn2->remote_openclose_args_pa = 0;
+
+ part_sn2->local_chctl_amo_va = xpc_init_IRQ_amo_sn2(partid);
+
+ part_sn2->notify_IRQ_nasid = 0;
+ part_sn2->notify_IRQ_phys_cpuid = 0;
+ part_sn2->remote_chctl_amo_va = NULL;
+
+ sprintf(part_sn2->notify_IRQ_owner, "xpc%02d", partid);
+ ret = request_irq(SGI_XPC_NOTIFY, xpc_handle_notify_IRQ_sn2,
+ IRQF_SHARED, part_sn2->notify_IRQ_owner,
+ (void *)(u64)partid);
+ if (ret != 0) {
+ dev_err(xpc_chan, "can't register NOTIFY IRQ handler, "
+ "errno=%d\n", -ret);
+ retval = xpLackOfResources;
+ goto out_3;
+ }
+
+ /* Setup a timer to check for dropped notify IRQs */
+ timer = &part_sn2->dropped_notify_IRQ_timer;
+ init_timer(timer);
+ timer->function =
+ (void (*)(unsigned long))xpc_check_for_dropped_notify_IRQ_sn2;
+ timer->data = (unsigned long)part;
+ timer->expires = jiffies + XPC_DROPPED_NOTIFY_IRQ_WAIT_INTERVAL;
+ add_timer(timer);
+
+ for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
+ ch_sn2 = &part->channels[ch_number].sn.sn2;
+
+ ch_sn2->local_GP = &part_sn2->local_GPs[ch_number];
+ ch_sn2->local_openclose_args =
+ &part_sn2->local_openclose_args[ch_number];
+
+ mutex_init(&ch_sn2->msg_to_pull_mutex);
+ }
+
+ /*
+ * Setup the per partition specific variables required by the
+ * remote partition to establish channel connections with us.
+ *
+ * The setting of the magic # indicates that these per partition
+ * specific variables are ready to be used.
+ */
+ xpc_vars_part_sn2[partid].GPs_pa = xp_pa(part_sn2->local_GPs);
+ xpc_vars_part_sn2[partid].openclose_args_pa =
+ xp_pa(part_sn2->local_openclose_args);
+ xpc_vars_part_sn2[partid].chctl_amo_pa =
+ xp_pa(part_sn2->local_chctl_amo_va);
+ cpuid = raw_smp_processor_id(); /* any CPU in this partition will do */
+ xpc_vars_part_sn2[partid].notify_IRQ_nasid = cpuid_to_nasid(cpuid);
+ xpc_vars_part_sn2[partid].notify_IRQ_phys_cpuid =
+ cpu_physical_id(cpuid);
+ xpc_vars_part_sn2[partid].nchannels = part->nchannels;
+ xpc_vars_part_sn2[partid].magic = XPC_VP_MAGIC1_SN2;
+
+ return xpSuccess;
+
+ /* setup of ch structures failed */
+out_3:
+ kfree(part_sn2->local_openclose_args_base);
+ part_sn2->local_openclose_args = NULL;
+out_2:
+ kfree(part_sn2->remote_GPs_base);
+ part_sn2->remote_GPs = NULL;
+out_1:
+ kfree(part_sn2->local_GPs_base);
+ part_sn2->local_GPs = NULL;
+ return retval;
+}
+
+/*
+ * Teardown the channel structures that are sn2 specific.
+ */
+static void
+xpc_teardown_ch_structures_sn2(struct xpc_partition *part)
+{
+ struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
+ short partid = XPC_PARTID(part);
+
+ /*
+ * Indicate that the variables specific to the remote partition are no
+ * longer available for its use.
+ */
+ xpc_vars_part_sn2[partid].magic = 0;
+
+ /* in case we've still got outstanding timers registered... */
+ del_timer_sync(&part_sn2->dropped_notify_IRQ_timer);
+ free_irq(SGI_XPC_NOTIFY, (void *)(u64)partid);
+
+ kfree(part_sn2->local_openclose_args_base);
+ part_sn2->local_openclose_args = NULL;
+ kfree(part_sn2->remote_GPs_base);
+ part_sn2->remote_GPs = NULL;
+ kfree(part_sn2->local_GPs_base);
+ part_sn2->local_GPs = NULL;
+ part_sn2->local_chctl_amo_va = NULL;
+}
+
+/*
+ * Create a wrapper that hides the underlying mechanism for pulling a cacheline
+ * (or multiple cachelines) from a remote partition.
+ *
+ * src_pa must be a cacheline aligned physical address on the remote partition.
+ * dst must be a cacheline aligned virtual address on this partition.
+ * cnt must be cacheline sized
+ */
+/* ??? Replace this function by call to xp_remote_memcpy() or bte_copy()? */
+static enum xp_retval
+xpc_pull_remote_cachelines_sn2(struct xpc_partition *part, void *dst,
+ const unsigned long src_pa, size_t cnt)
+{
+ enum xp_retval ret;
+
+ DBUG_ON(src_pa != L1_CACHE_ALIGN(src_pa));
+ DBUG_ON((unsigned long)dst != L1_CACHE_ALIGN((unsigned long)dst));
+ DBUG_ON(cnt != L1_CACHE_ALIGN(cnt));
+
+ if (part->act_state == XPC_P_AS_DEACTIVATING)
+ return part->reason;
+
+ ret = xp_remote_memcpy(xp_pa(dst), src_pa, cnt);
+ if (ret != xpSuccess) {
+ dev_dbg(xpc_chan, "xp_remote_memcpy() from partition %d failed,"
+ " ret=%d\n", XPC_PARTID(part), ret);
+ }
+ return ret;
+}
+
+/*
+ * Pull the remote per partition specific variables from the specified
+ * partition.
+ */
+static enum xp_retval
+xpc_pull_remote_vars_part_sn2(struct xpc_partition *part)
+{
+ struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
+ u8 buffer[L1_CACHE_BYTES * 2];
+ struct xpc_vars_part_sn2 *pulled_entry_cacheline =
+ (struct xpc_vars_part_sn2 *)L1_CACHE_ALIGN((u64)buffer);
+ struct xpc_vars_part_sn2 *pulled_entry;
+ unsigned long remote_entry_cacheline_pa;
+ unsigned long remote_entry_pa;
+ short partid = XPC_PARTID(part);
+ enum xp_retval ret;
+
+ /* pull the cacheline that contains the variables we're interested in */
+
+ DBUG_ON(part_sn2->remote_vars_part_pa !=
+ L1_CACHE_ALIGN(part_sn2->remote_vars_part_pa));
+ DBUG_ON(sizeof(struct xpc_vars_part_sn2) != L1_CACHE_BYTES / 2);
+
+ remote_entry_pa = part_sn2->remote_vars_part_pa +
+ sn_partition_id * sizeof(struct xpc_vars_part_sn2);
+
+ remote_entry_cacheline_pa = (remote_entry_pa & ~(L1_CACHE_BYTES - 1));
+
+ pulled_entry = (struct xpc_vars_part_sn2 *)((u64)pulled_entry_cacheline
+ + (remote_entry_pa &
+ (L1_CACHE_BYTES - 1)));
+
+ ret = xpc_pull_remote_cachelines_sn2(part, pulled_entry_cacheline,
+ remote_entry_cacheline_pa,
+ L1_CACHE_BYTES);
+ if (ret != xpSuccess) {
+ dev_dbg(xpc_chan, "failed to pull XPC vars_part from "
+ "partition %d, ret=%d\n", partid, ret);
+ return ret;
+ }
+
+ /* see if they've been set up yet */
+
+ if (pulled_entry->magic != XPC_VP_MAGIC1_SN2 &&
+ pulled_entry->magic != XPC_VP_MAGIC2_SN2) {
+
+ if (pulled_entry->magic != 0) {
+ dev_dbg(xpc_chan, "partition %d's XPC vars_part for "
+ "partition %d has bad magic value (=0x%llx)\n",
+ partid, sn_partition_id, pulled_entry->magic);
+ return xpBadMagic;
+ }
+
+ /* they've not been initialized yet */
+ return xpRetry;
+ }
+
+ if (xpc_vars_part_sn2[partid].magic == XPC_VP_MAGIC1_SN2) {
+
+ /* validate the variables */
+
+ if (pulled_entry->GPs_pa == 0 ||
+ pulled_entry->openclose_args_pa == 0 ||
+ pulled_entry->chctl_amo_pa == 0) {
+
+ dev_err(xpc_chan, "partition %d's XPC vars_part for "
+ "partition %d are not valid\n", partid,
+ sn_partition_id);
+ return xpInvalidAddress;
+ }
+
+ /* the variables we imported look to be valid */
+
+ part_sn2->remote_GPs_pa = pulled_entry->GPs_pa;
+ part_sn2->remote_openclose_args_pa =
+ pulled_entry->openclose_args_pa;
+ part_sn2->remote_chctl_amo_va =
+ (struct amo *)__va(pulled_entry->chctl_amo_pa);
+ part_sn2->notify_IRQ_nasid = pulled_entry->notify_IRQ_nasid;
+ part_sn2->notify_IRQ_phys_cpuid =
+ pulled_entry->notify_IRQ_phys_cpuid;
+
+ if (part->nchannels > pulled_entry->nchannels)
+ part->nchannels = pulled_entry->nchannels;
+
+ /* let the other side know that we've pulled their variables */
+
+ xpc_vars_part_sn2[partid].magic = XPC_VP_MAGIC2_SN2;
+ }
+
+ if (pulled_entry->magic == XPC_VP_MAGIC1_SN2)
+ return xpRetry;
+
+ return xpSuccess;
+}
+
+/*
+ * Establish first contact with the remote partititon. This involves pulling
+ * the XPC per partition variables from the remote partition and waiting for
+ * the remote partition to pull ours.
+ */
+static enum xp_retval
+xpc_make_first_contact_sn2(struct xpc_partition *part)
+{
+ struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
+ enum xp_retval ret;
+
+ /*
+ * Register the remote partition's amos with SAL so it can handle
+ * and cleanup errors within that address range should the remote
+ * partition go down. We don't unregister this range because it is
+ * difficult to tell when outstanding writes to the remote partition
+ * are finished and thus when it is safe to unregister. This should
+ * not result in wasted space in the SAL xp_addr_region table because
+ * we should get the same page for remote_amos_page_pa after module
+ * reloads and system reboots.
+ */
+ if (sn_register_xp_addr_region(part_sn2->remote_amos_page_pa,
+ PAGE_SIZE, 1) < 0) {
+ dev_warn(xpc_part, "xpc_activating(%d) failed to register "
+ "xp_addr region\n", XPC_PARTID(part));
+
+ ret = xpPhysAddrRegFailed;
+ XPC_DEACTIVATE_PARTITION(part, ret);
+ return ret;
+ }
+
+ /*
+ * Send activate IRQ to get other side to activate if they've not
+ * already begun to do so.
+ */
+ xpc_send_activate_IRQ_sn2(part_sn2->remote_amos_page_pa,
+ cnodeid_to_nasid(0),
+ part_sn2->activate_IRQ_nasid,
+ part_sn2->activate_IRQ_phys_cpuid);
+
+ while ((ret = xpc_pull_remote_vars_part_sn2(part)) != xpSuccess) {
+ if (ret != xpRetry) {
+ XPC_DEACTIVATE_PARTITION(part, ret);
+ return ret;
+ }
+
+ dev_dbg(xpc_part, "waiting to make first contact with "
+ "partition %d\n", XPC_PARTID(part));
+
+ /* wait a 1/4 of a second or so */
+ (void)msleep_interruptible(250);
+
+ if (part->act_state == XPC_P_AS_DEACTIVATING)
+ return part->reason;
+ }
+
+ return xpSuccess;
+}
+
+/*
+ * Get the chctl flags and pull the openclose args and/or remote GPs as needed.
+ */
+static u64
+xpc_get_chctl_all_flags_sn2(struct xpc_partition *part)
+{
+ struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
+ unsigned long irq_flags;
+ union xpc_channel_ctl_flags chctl;
+ enum xp_retval ret;
+
+ /*
+ * See if there are any chctl flags to be handled.
+ */
+
+ spin_lock_irqsave(&part->chctl_lock, irq_flags);
+ chctl = part->chctl;
+ if (chctl.all_flags != 0)
+ part->chctl.all_flags = 0;
+
+ spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
+
+ if (xpc_any_openclose_chctl_flags_set(&chctl)) {
+ ret = xpc_pull_remote_cachelines_sn2(part, part->
+ remote_openclose_args,
+ part_sn2->
+ remote_openclose_args_pa,
+ XPC_OPENCLOSE_ARGS_SIZE);
+ if (ret != xpSuccess) {
+ XPC_DEACTIVATE_PARTITION(part, ret);
+
+ dev_dbg(xpc_chan, "failed to pull openclose args from "
+ "partition %d, ret=%d\n", XPC_PARTID(part),
+ ret);
+
+ /* don't bother processing chctl flags anymore */
+ chctl.all_flags = 0;
+ }
+ }
+
+ if (xpc_any_msg_chctl_flags_set(&chctl)) {
+ ret = xpc_pull_remote_cachelines_sn2(part, part_sn2->remote_GPs,
+ part_sn2->remote_GPs_pa,
+ XPC_GP_SIZE);
+ if (ret != xpSuccess) {
+ XPC_DEACTIVATE_PARTITION(part, ret);
+
+ dev_dbg(xpc_chan, "failed to pull GPs from partition "
+ "%d, ret=%d\n", XPC_PARTID(part), ret);
+
+ /* don't bother processing chctl flags anymore */
+ chctl.all_flags = 0;
+ }
+ }
+
+ return chctl.all_flags;
+}
+
+/*
+ * Allocate the local message queue and the notify queue.
+ */
+static enum xp_retval
+xpc_allocate_local_msgqueue_sn2(struct xpc_channel *ch)
+{
+ struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
+ unsigned long irq_flags;
+ int nentries;
+ size_t nbytes;
+
+ for (nentries = ch->local_nentries; nentries > 0; nentries--) {
+
+ nbytes = nentries * ch->entry_size;
+ ch_sn2->local_msgqueue =
+ xpc_kzalloc_cacheline_aligned(nbytes, GFP_KERNEL,
+ &ch_sn2->local_msgqueue_base);
+ if (ch_sn2->local_msgqueue == NULL)
+ continue;
+
+ nbytes = nentries * sizeof(struct xpc_notify_sn2);
+ ch_sn2->notify_queue = kzalloc(nbytes, GFP_KERNEL);
+ if (ch_sn2->notify_queue == NULL) {
+ kfree(ch_sn2->local_msgqueue_base);
+ ch_sn2->local_msgqueue = NULL;
+ continue;
+ }
+
+ spin_lock_irqsave(&ch->lock, irq_flags);
+ if (nentries < ch->local_nentries) {
+ dev_dbg(xpc_chan, "nentries=%d local_nentries=%d, "
+ "partid=%d, channel=%d\n", nentries,
+ ch->local_nentries, ch->partid, ch->number);
+
+ ch->local_nentries = nentries;
+ }
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ return xpSuccess;
+ }
+
+ dev_dbg(xpc_chan, "can't get memory for local message queue and notify "
+ "queue, partid=%d, channel=%d\n", ch->partid, ch->number);
+ return xpNoMemory;
+}
+
+/*
+ * Allocate the cached remote message queue.
+ */
+static enum xp_retval
+xpc_allocate_remote_msgqueue_sn2(struct xpc_channel *ch)
+{
+ struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
+ unsigned long irq_flags;
+ int nentries;
+ size_t nbytes;
+
+ DBUG_ON(ch->remote_nentries <= 0);
+
+ for (nentries = ch->remote_nentries; nentries > 0; nentries--) {
+
+ nbytes = nentries * ch->entry_size;
+ ch_sn2->remote_msgqueue =
+ xpc_kzalloc_cacheline_aligned(nbytes, GFP_KERNEL, &ch_sn2->
+ remote_msgqueue_base);
+ if (ch_sn2->remote_msgqueue == NULL)
+ continue;
+
+ spin_lock_irqsave(&ch->lock, irq_flags);
+ if (nentries < ch->remote_nentries) {
+ dev_dbg(xpc_chan, "nentries=%d remote_nentries=%d, "
+ "partid=%d, channel=%d\n", nentries,
+ ch->remote_nentries, ch->partid, ch->number);
+
+ ch->remote_nentries = nentries;
+ }
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ return xpSuccess;
+ }
+
+ dev_dbg(xpc_chan, "can't get memory for cached remote message queue, "
+ "partid=%d, channel=%d\n", ch->partid, ch->number);
+ return xpNoMemory;
+}
+
+/*
+ * Allocate message queues and other stuff associated with a channel.
+ *
+ * Note: Assumes all of the channel sizes are filled in.
+ */
+static enum xp_retval
+xpc_setup_msg_structures_sn2(struct xpc_channel *ch)
+{
+ struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
+ enum xp_retval ret;
+
+ DBUG_ON(ch->flags & XPC_C_SETUP);
+
+ ret = xpc_allocate_local_msgqueue_sn2(ch);
+ if (ret == xpSuccess) {
+
+ ret = xpc_allocate_remote_msgqueue_sn2(ch);
+ if (ret != xpSuccess) {
+ kfree(ch_sn2->local_msgqueue_base);
+ ch_sn2->local_msgqueue = NULL;
+ kfree(ch_sn2->notify_queue);
+ ch_sn2->notify_queue = NULL;
+ }
+ }
+ return ret;
+}
+
+/*
+ * Free up message queues and other stuff that were allocated for the specified
+ * channel.
+ */
+static void
+xpc_teardown_msg_structures_sn2(struct xpc_channel *ch)
+{
+ struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
+
+ DBUG_ON(!spin_is_locked(&ch->lock));
+
+ ch_sn2->remote_msgqueue_pa = 0;
+
+ ch_sn2->local_GP->get = 0;
+ ch_sn2->local_GP->put = 0;
+ ch_sn2->remote_GP.get = 0;
+ ch_sn2->remote_GP.put = 0;
+ ch_sn2->w_local_GP.get = 0;
+ ch_sn2->w_local_GP.put = 0;
+ ch_sn2->w_remote_GP.get = 0;
+ ch_sn2->w_remote_GP.put = 0;
+ ch_sn2->next_msg_to_pull = 0;
+
+ if (ch->flags & XPC_C_SETUP) {
+ dev_dbg(xpc_chan, "ch->flags=0x%x, partid=%d, channel=%d\n",
+ ch->flags, ch->partid, ch->number);
+
+ kfree(ch_sn2->local_msgqueue_base);
+ ch_sn2->local_msgqueue = NULL;
+ kfree(ch_sn2->remote_msgqueue_base);
+ ch_sn2->remote_msgqueue = NULL;
+ kfree(ch_sn2->notify_queue);
+ ch_sn2->notify_queue = NULL;
+ }
+}
+
+/*
+ * Notify those who wanted to be notified upon delivery of their message.
+ */
+static void
+xpc_notify_senders_sn2(struct xpc_channel *ch, enum xp_retval reason, s64 put)
+{
+ struct xpc_notify_sn2 *notify;
+ u8 notify_type;
+ s64 get = ch->sn.sn2.w_remote_GP.get - 1;
+
+ while (++get < put && atomic_read(&ch->n_to_notify) > 0) {
+
+ notify = &ch->sn.sn2.notify_queue[get % ch->local_nentries];
+
+ /*
+ * See if the notify entry indicates it was associated with
+ * a message who's sender wants to be notified. It is possible
+ * that it is, but someone else is doing or has done the
+ * notification.
+ */
+ notify_type = notify->type;
+ if (notify_type == 0 ||
+ cmpxchg(&notify->type, notify_type, 0) != notify_type) {
+ continue;
+ }
+
+ DBUG_ON(notify_type != XPC_N_CALL);
+
+ atomic_dec(&ch->n_to_notify);
+
+ if (notify->func != NULL) {
+ dev_dbg(xpc_chan, "notify->func() called, notify=0x%p "
+ "msg_number=%lld partid=%d channel=%d\n",
+ (void *)notify, get, ch->partid, ch->number);
+
+ notify->func(reason, ch->partid, ch->number,
+ notify->key);
+
+ dev_dbg(xpc_chan, "notify->func() returned, notify=0x%p"
+ " msg_number=%lld partid=%d channel=%d\n",
+ (void *)notify, get, ch->partid, ch->number);
+ }
+ }
+}
+
+static void
+xpc_notify_senders_of_disconnect_sn2(struct xpc_channel *ch)
+{
+ xpc_notify_senders_sn2(ch, ch->reason, ch->sn.sn2.w_local_GP.put);
+}
+
+/*
+ * Clear some of the msg flags in the local message queue.
+ */
+static inline void
+xpc_clear_local_msgqueue_flags_sn2(struct xpc_channel *ch)
+{
+ struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
+ struct xpc_msg_sn2 *msg;
+ s64 get;
+
+ get = ch_sn2->w_remote_GP.get;
+ do {
+ msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->local_msgqueue +
+ (get % ch->local_nentries) *
+ ch->entry_size);
+ DBUG_ON(!(msg->flags & XPC_M_SN2_READY));
+ msg->flags = 0;
+ } while (++get < ch_sn2->remote_GP.get);
+}
+
+/*
+ * Clear some of the msg flags in the remote message queue.
+ */
+static inline void
+xpc_clear_remote_msgqueue_flags_sn2(struct xpc_channel *ch)
+{
+ struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
+ struct xpc_msg_sn2 *msg;
+ s64 put, remote_nentries = ch->remote_nentries;
+
+ /* flags are zeroed when the buffer is allocated */
+ if (ch_sn2->remote_GP.put < remote_nentries)
+ return;
+
+ put = max(ch_sn2->w_remote_GP.put, remote_nentries);
+ do {
+ msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->remote_msgqueue +
+ (put % remote_nentries) *
+ ch->entry_size);
+ DBUG_ON(!(msg->flags & XPC_M_SN2_READY));
+ DBUG_ON(!(msg->flags & XPC_M_SN2_DONE));
+ DBUG_ON(msg->number != put - remote_nentries);
+ msg->flags = 0;
+ } while (++put < ch_sn2->remote_GP.put);
+}
+
+static int
+xpc_n_of_deliverable_payloads_sn2(struct xpc_channel *ch)
+{
+ return ch->sn.sn2.w_remote_GP.put - ch->sn.sn2.w_local_GP.get;
+}
+
+static void
+xpc_process_msg_chctl_flags_sn2(struct xpc_partition *part, int ch_number)
+{
+ struct xpc_channel *ch = &part->channels[ch_number];
+ struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
+ int npayloads_sent;
+
+ ch_sn2->remote_GP = part->sn.sn2.remote_GPs[ch_number];
+
+ /* See what, if anything, has changed for each connected channel */
+
+ xpc_msgqueue_ref(ch);
+
+ if (ch_sn2->w_remote_GP.get == ch_sn2->remote_GP.get &&
+ ch_sn2->w_remote_GP.put == ch_sn2->remote_GP.put) {
+ /* nothing changed since GPs were last pulled */
+ xpc_msgqueue_deref(ch);
+ return;
+ }
+
+ if (!(ch->flags & XPC_C_CONNECTED)) {
+ xpc_msgqueue_deref(ch);
+ return;
+ }
+
+ /*
+ * First check to see if messages recently sent by us have been
+ * received by the other side. (The remote GET value will have
+ * changed since we last looked at it.)
+ */
+
+ if (ch_sn2->w_remote_GP.get != ch_sn2->remote_GP.get) {
+
+ /*
+ * We need to notify any senders that want to be notified
+ * that their sent messages have been received by their
+ * intended recipients. We need to do this before updating
+ * w_remote_GP.get so that we don't allocate the same message
+ * queue entries prematurely (see xpc_allocate_msg()).
+ */
+ if (atomic_read(&ch->n_to_notify) > 0) {
+ /*
+ * Notify senders that messages sent have been
+ * received and delivered by the other side.
+ */
+ xpc_notify_senders_sn2(ch, xpMsgDelivered,
+ ch_sn2->remote_GP.get);
+ }
+
+ /*
+ * Clear msg->flags in previously sent messages, so that
+ * they're ready for xpc_allocate_msg().
+ */
+ xpc_clear_local_msgqueue_flags_sn2(ch);
+
+ ch_sn2->w_remote_GP.get = ch_sn2->remote_GP.get;
+
+ dev_dbg(xpc_chan, "w_remote_GP.get changed to %lld, partid=%d, "
+ "channel=%d\n", ch_sn2->w_remote_GP.get, ch->partid,
+ ch->number);
+
+ /*
+ * If anyone was waiting for message queue entries to become
+ * available, wake them up.
+ */
+ if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
+ wake_up(&ch->msg_allocate_wq);
+ }
+
+ /*
+ * Now check for newly sent messages by the other side. (The remote
+ * PUT value will have changed since we last looked at it.)
+ */
+
+ if (ch_sn2->w_remote_GP.put != ch_sn2->remote_GP.put) {
+ /*
+ * Clear msg->flags in previously received messages, so that
+ * they're ready for xpc_get_deliverable_payload_sn2().
+ */
+ xpc_clear_remote_msgqueue_flags_sn2(ch);
+
+ smp_wmb(); /* ensure flags have been cleared before bte_copy */
+ ch_sn2->w_remote_GP.put = ch_sn2->remote_GP.put;
+
+ dev_dbg(xpc_chan, "w_remote_GP.put changed to %lld, partid=%d, "
+ "channel=%d\n", ch_sn2->w_remote_GP.put, ch->partid,
+ ch->number);
+
+ npayloads_sent = xpc_n_of_deliverable_payloads_sn2(ch);
+ if (npayloads_sent > 0) {
+ dev_dbg(xpc_chan, "msgs waiting to be copied and "
+ "delivered=%d, partid=%d, channel=%d\n",
+ npayloads_sent, ch->partid, ch->number);
+
+ if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE)
+ xpc_activate_kthreads(ch, npayloads_sent);
+ }
+ }
+
+ xpc_msgqueue_deref(ch);
+}
+
+static struct xpc_msg_sn2 *
+xpc_pull_remote_msg_sn2(struct xpc_channel *ch, s64 get)
+{
+ struct xpc_partition *part = &xpc_partitions[ch->partid];
+ struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
+ unsigned long remote_msg_pa;
+ struct xpc_msg_sn2 *msg;
+ u32 msg_index;
+ u32 nmsgs;
+ u64 msg_offset;
+ enum xp_retval ret;
+
+ if (mutex_lock_interruptible(&ch_sn2->msg_to_pull_mutex) != 0) {
+ /* we were interrupted by a signal */
+ return NULL;
+ }
+
+ while (get >= ch_sn2->next_msg_to_pull) {
+
+ /* pull as many messages as are ready and able to be pulled */
+
+ msg_index = ch_sn2->next_msg_to_pull % ch->remote_nentries;
+
+ DBUG_ON(ch_sn2->next_msg_to_pull >= ch_sn2->w_remote_GP.put);
+ nmsgs = ch_sn2->w_remote_GP.put - ch_sn2->next_msg_to_pull;
+ if (msg_index + nmsgs > ch->remote_nentries) {
+ /* ignore the ones that wrap the msg queue for now */
+ nmsgs = ch->remote_nentries - msg_index;
+ }
+
+ msg_offset = msg_index * ch->entry_size;
+ msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->remote_msgqueue +
+ msg_offset);
+ remote_msg_pa = ch_sn2->remote_msgqueue_pa + msg_offset;
+
+ ret = xpc_pull_remote_cachelines_sn2(part, msg, remote_msg_pa,
+ nmsgs * ch->entry_size);
+ if (ret != xpSuccess) {
+
+ dev_dbg(xpc_chan, "failed to pull %d msgs starting with"
+ " msg %lld from partition %d, channel=%d, "
+ "ret=%d\n", nmsgs, ch_sn2->next_msg_to_pull,
+ ch->partid, ch->number, ret);
+
+ XPC_DEACTIVATE_PARTITION(part, ret);
+
+ mutex_unlock(&ch_sn2->msg_to_pull_mutex);
+ return NULL;
+ }
+
+ ch_sn2->next_msg_to_pull += nmsgs;
+ }
+
+ mutex_unlock(&ch_sn2->msg_to_pull_mutex);
+
+ /* return the message we were looking for */
+ msg_offset = (get % ch->remote_nentries) * ch->entry_size;
+ msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->remote_msgqueue + msg_offset);
+
+ return msg;
+}
+
+/*
+ * Get the next deliverable message's payload.
+ */
+static void *
+xpc_get_deliverable_payload_sn2(struct xpc_channel *ch)
+{
+ struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
+ struct xpc_msg_sn2 *msg;
+ void *payload = NULL;
+ s64 get;
+
+ do {
+ if (ch->flags & XPC_C_DISCONNECTING)
+ break;
+
+ get = ch_sn2->w_local_GP.get;
+ smp_rmb(); /* guarantee that .get loads before .put */
+ if (get == ch_sn2->w_remote_GP.put)
+ break;
+
+ /* There are messages waiting to be pulled and delivered.
+ * We need to try to secure one for ourselves. We'll do this
+ * by trying to increment w_local_GP.get and hope that no one
+ * else beats us to it. If they do, we'll we'll simply have
+ * to try again for the next one.
+ */
+
+ if (cmpxchg(&ch_sn2->w_local_GP.get, get, get + 1) == get) {
+ /* we got the entry referenced by get */
+
+ dev_dbg(xpc_chan, "w_local_GP.get changed to %lld, "
+ "partid=%d, channel=%d\n", get + 1,
+ ch->partid, ch->number);
+
+ /* pull the message from the remote partition */
+
+ msg = xpc_pull_remote_msg_sn2(ch, get);
+
+ if (msg != NULL) {
+ DBUG_ON(msg->number != get);
+ DBUG_ON(msg->flags & XPC_M_SN2_DONE);
+ DBUG_ON(!(msg->flags & XPC_M_SN2_READY));
+
+ payload = &msg->payload;
+ }
+ break;
+ }
+
+ } while (1);
+
+ return payload;
+}
+
+/*
+ * Now we actually send the messages that are ready to be sent by advancing
+ * the local message queue's Put value and then send a chctl msgrequest to the
+ * recipient partition.
+ */
+static void
+xpc_send_msgs_sn2(struct xpc_channel *ch, s64 initial_put)
+{
+ struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
+ struct xpc_msg_sn2 *msg;
+ s64 put = initial_put + 1;
+ int send_msgrequest = 0;
+
+ while (1) {
+
+ while (1) {
+ if (put == ch_sn2->w_local_GP.put)
+ break;
+
+ msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->
+ local_msgqueue + (put %
+ ch->local_nentries) *
+ ch->entry_size);
+
+ if (!(msg->flags & XPC_M_SN2_READY))
+ break;
+
+ put++;
+ }
+
+ if (put == initial_put) {
+ /* nothing's changed */
+ break;
+ }
+
+ if (cmpxchg_rel(&ch_sn2->local_GP->put, initial_put, put) !=
+ initial_put) {
+ /* someone else beat us to it */
+ DBUG_ON(ch_sn2->local_GP->put < initial_put);
+ break;
+ }
+
+ /* we just set the new value of local_GP->put */
+
+ dev_dbg(xpc_chan, "local_GP->put changed to %lld, partid=%d, "
+ "channel=%d\n", put, ch->partid, ch->number);
+
+ send_msgrequest = 1;
+
+ /*
+ * We need to ensure that the message referenced by
+ * local_GP->put is not XPC_M_SN2_READY or that local_GP->put
+ * equals w_local_GP.put, so we'll go have a look.
+ */
+ initial_put = put;
+ }
+
+ if (send_msgrequest)
+ xpc_send_chctl_msgrequest_sn2(ch);
+}
+
+/*
+ * Allocate an entry for a message from the message queue associated with the
+ * specified channel.
+ */
+static enum xp_retval
+xpc_allocate_msg_sn2(struct xpc_channel *ch, u32 flags,
+ struct xpc_msg_sn2 **address_of_msg)
+{
+ struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
+ struct xpc_msg_sn2 *msg;
+ enum xp_retval ret;
+ s64 put;
+
+ /*
+ * Get the next available message entry from the local message queue.
+ * If none are available, we'll make sure that we grab the latest
+ * GP values.
+ */
+ ret = xpTimeout;
+
+ while (1) {
+
+ put = ch_sn2->w_local_GP.put;
+ smp_rmb(); /* guarantee that .put loads before .get */
+ if (put - ch_sn2->w_remote_GP.get < ch->local_nentries) {
+
+ /* There are available message entries. We need to try
+ * to secure one for ourselves. We'll do this by trying
+ * to increment w_local_GP.put as long as someone else
+ * doesn't beat us to it. If they do, we'll have to
+ * try again.
+ */
+ if (cmpxchg(&ch_sn2->w_local_GP.put, put, put + 1) ==
+ put) {
+ /* we got the entry referenced by put */
+ break;
+ }
+ continue; /* try again */
+ }
+
+ /*
+ * There aren't any available msg entries at this time.
+ *
+ * In waiting for a message entry to become available,
+ * we set a timeout in case the other side is not sending
+ * completion interrupts. This lets us fake a notify IRQ
+ * that will cause the notify IRQ handler to fetch the latest
+ * GP values as if an interrupt was sent by the other side.
+ */
+ if (ret == xpTimeout)
+ xpc_send_chctl_local_msgrequest_sn2(ch);
+
+ if (flags & XPC_NOWAIT)
+ return xpNoWait;
+
+ ret = xpc_allocate_msg_wait(ch);
+ if (ret != xpInterrupted && ret != xpTimeout)
+ return ret;
+ }
+
+ /* get the message's address and initialize it */
+ msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->local_msgqueue +
+ (put % ch->local_nentries) *
+ ch->entry_size);
+
+ DBUG_ON(msg->flags != 0);
+ msg->number = put;
+
+ dev_dbg(xpc_chan, "w_local_GP.put changed to %lld; msg=0x%p, "
+ "msg_number=%lld, partid=%d, channel=%d\n", put + 1,
+ (void *)msg, msg->number, ch->partid, ch->number);
+
+ *address_of_msg = msg;
+ return xpSuccess;
+}
+
+/*
+ * Common code that does the actual sending of the message by advancing the
+ * local message queue's Put value and sends a chctl msgrequest to the
+ * partition the message is being sent to.
+ */
+static enum xp_retval
+xpc_send_payload_sn2(struct xpc_channel *ch, u32 flags, void *payload,
+ u16 payload_size, u8 notify_type, xpc_notify_func func,
+ void *key)
+{
+ enum xp_retval ret = xpSuccess;
+ struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
+ struct xpc_msg_sn2 *msg = msg;
+ struct xpc_notify_sn2 *notify = notify;
+ s64 msg_number;
+ s64 put;
+
+ DBUG_ON(notify_type == XPC_N_CALL && func == NULL);
+
+ if (XPC_MSG_SIZE(payload_size) > ch->entry_size)
+ return xpPayloadTooBig;
+
+ xpc_msgqueue_ref(ch);
+
+ if (ch->flags & XPC_C_DISCONNECTING) {
+ ret = ch->reason;
+ goto out_1;
+ }
+ if (!(ch->flags & XPC_C_CONNECTED)) {
+ ret = xpNotConnected;
+ goto out_1;
+ }
+
+ ret = xpc_allocate_msg_sn2(ch, flags, &msg);
+ if (ret != xpSuccess)
+ goto out_1;
+
+ msg_number = msg->number;
+
+ if (notify_type != 0) {
+ /*
+ * Tell the remote side to send an ACK interrupt when the
+ * message has been delivered.
+ */
+ msg->flags |= XPC_M_SN2_INTERRUPT;
+
+ atomic_inc(&ch->n_to_notify);
+
+ notify = &ch_sn2->notify_queue[msg_number % ch->local_nentries];
+ notify->func = func;
+ notify->key = key;
+ notify->type = notify_type;
+
+ /* ??? Is a mb() needed here? */
+
+ if (ch->flags & XPC_C_DISCONNECTING) {
+ /*
+ * An error occurred between our last error check and
+ * this one. We will try to clear the type field from
+ * the notify entry. If we succeed then
+ * xpc_disconnect_channel() didn't already process
+ * the notify entry.
+ */
+ if (cmpxchg(&notify->type, notify_type, 0) ==
+ notify_type) {
+ atomic_dec(&ch->n_to_notify);
+ ret = ch->reason;
+ }
+ goto out_1;
+ }
+ }
+
+ memcpy(&msg->payload, payload, payload_size);
+
+ msg->flags |= XPC_M_SN2_READY;
+
+ /*
+ * The preceding store of msg->flags must occur before the following
+ * load of local_GP->put.
+ */
+ smp_mb();
+
+ /* see if the message is next in line to be sent, if so send it */
+
+ put = ch_sn2->local_GP->put;
+ if (put == msg_number)
+ xpc_send_msgs_sn2(ch, put);
+
+out_1:
+ xpc_msgqueue_deref(ch);
+ return ret;
+}
+
+/*
+ * Now we actually acknowledge the messages that have been delivered and ack'd
+ * by advancing the cached remote message queue's Get value and if requested
+ * send a chctl msgrequest to the message sender's partition.
+ *
+ * If a message has XPC_M_SN2_INTERRUPT set, send an interrupt to the partition
+ * that sent the message.
+ */
+static void
+xpc_acknowledge_msgs_sn2(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
+{
+ struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
+ struct xpc_msg_sn2 *msg;
+ s64 get = initial_get + 1;
+ int send_msgrequest = 0;
+
+ while (1) {
+
+ while (1) {
+ if (get == ch_sn2->w_local_GP.get)
+ break;
+
+ msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->
+ remote_msgqueue + (get %
+ ch->remote_nentries) *
+ ch->entry_size);
+
+ if (!(msg->flags & XPC_M_SN2_DONE))
+ break;
+
+ msg_flags |= msg->flags;
+ get++;
+ }
+
+ if (get == initial_get) {
+ /* nothing's changed */
+ break;
+ }
+
+ if (cmpxchg_rel(&ch_sn2->local_GP->get, initial_get, get) !=
+ initial_get) {
+ /* someone else beat us to it */
+ DBUG_ON(ch_sn2->local_GP->get <= initial_get);
+ break;
+ }
+
+ /* we just set the new value of local_GP->get */
+
+ dev_dbg(xpc_chan, "local_GP->get changed to %lld, partid=%d, "
+ "channel=%d\n", get, ch->partid, ch->number);
+
+ send_msgrequest = (msg_flags & XPC_M_SN2_INTERRUPT);
+
+ /*
+ * We need to ensure that the message referenced by
+ * local_GP->get is not XPC_M_SN2_DONE or that local_GP->get
+ * equals w_local_GP.get, so we'll go have a look.
+ */
+ initial_get = get;
+ }
+
+ if (send_msgrequest)
+ xpc_send_chctl_msgrequest_sn2(ch);
+}
+
+static void
+xpc_received_payload_sn2(struct xpc_channel *ch, void *payload)
+{
+ struct xpc_msg_sn2 *msg;
+ s64 msg_number;
+ s64 get;
+
+ msg = container_of(payload, struct xpc_msg_sn2, payload);
+ msg_number = msg->number;
+
+ dev_dbg(xpc_chan, "msg=0x%p, msg_number=%lld, partid=%d, channel=%d\n",
+ (void *)msg, msg_number, ch->partid, ch->number);
+
+ DBUG_ON((((u64)msg - (u64)ch->sn.sn2.remote_msgqueue) / ch->entry_size) !=
+ msg_number % ch->remote_nentries);
+ DBUG_ON(!(msg->flags & XPC_M_SN2_READY));
+ DBUG_ON(msg->flags & XPC_M_SN2_DONE);
+
+ msg->flags |= XPC_M_SN2_DONE;
+
+ /*
+ * The preceding store of msg->flags must occur before the following
+ * load of local_GP->get.
+ */
+ smp_mb();
+
+ /*
+ * See if this message is next in line to be acknowledged as having
+ * been delivered.
+ */
+ get = ch->sn.sn2.local_GP->get;
+ if (get == msg_number)
+ xpc_acknowledge_msgs_sn2(ch, get, msg->flags);
+}
+
+static struct xpc_arch_operations xpc_arch_ops_sn2 = {
+ .setup_partitions = xpc_setup_partitions_sn2,
+ .teardown_partitions = xpc_teardown_partitions_sn2,
+ .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2,
+ .get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_sn2,
+ .setup_rsvd_page = xpc_setup_rsvd_page_sn2,
+
+ .allow_hb = xpc_allow_hb_sn2,
+ .disallow_hb = xpc_disallow_hb_sn2,
+ .disallow_all_hbs = xpc_disallow_all_hbs_sn2,
+ .increment_heartbeat = xpc_increment_heartbeat_sn2,
+ .offline_heartbeat = xpc_offline_heartbeat_sn2,
+ .online_heartbeat = xpc_online_heartbeat_sn2,
+ .heartbeat_init = xpc_heartbeat_init_sn2,
+ .heartbeat_exit = xpc_heartbeat_exit_sn2,
+ .get_remote_heartbeat = xpc_get_remote_heartbeat_sn2,
+
+ .request_partition_activation =
+ xpc_request_partition_activation_sn2,
+ .request_partition_reactivation =
+ xpc_request_partition_reactivation_sn2,
+ .request_partition_deactivation =
+ xpc_request_partition_deactivation_sn2,
+ .cancel_partition_deactivation_request =
+ xpc_cancel_partition_deactivation_request_sn2,
+
+ .setup_ch_structures = xpc_setup_ch_structures_sn2,
+ .teardown_ch_structures = xpc_teardown_ch_structures_sn2,
+
+ .make_first_contact = xpc_make_first_contact_sn2,
+
+ .get_chctl_all_flags = xpc_get_chctl_all_flags_sn2,
+ .send_chctl_closerequest = xpc_send_chctl_closerequest_sn2,
+ .send_chctl_closereply = xpc_send_chctl_closereply_sn2,
+ .send_chctl_openrequest = xpc_send_chctl_openrequest_sn2,
+ .send_chctl_openreply = xpc_send_chctl_openreply_sn2,
+ .send_chctl_opencomplete = xpc_send_chctl_opencomplete_sn2,
+ .process_msg_chctl_flags = xpc_process_msg_chctl_flags_sn2,
+
+ .save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_sn2,
+
+ .setup_msg_structures = xpc_setup_msg_structures_sn2,
+ .teardown_msg_structures = xpc_teardown_msg_structures_sn2,
+
+ .indicate_partition_engaged = xpc_indicate_partition_engaged_sn2,
+ .indicate_partition_disengaged = xpc_indicate_partition_disengaged_sn2,
+ .partition_engaged = xpc_partition_engaged_sn2,
+ .any_partition_engaged = xpc_any_partition_engaged_sn2,
+ .assume_partition_disengaged = xpc_assume_partition_disengaged_sn2,
+
+ .n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_sn2,
+ .send_payload = xpc_send_payload_sn2,
+ .get_deliverable_payload = xpc_get_deliverable_payload_sn2,
+ .received_payload = xpc_received_payload_sn2,
+ .notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_sn2,
+};
+
+int
+xpc_init_sn2(void)
+{
+ int ret;
+ size_t buf_size;
+
+ xpc_arch_ops = xpc_arch_ops_sn2;
+
+ if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) {
+ dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is "
+ "larger than %d\n", XPC_MSG_HDR_MAX_SIZE);
+ return -E2BIG;
+ }
+
+ buf_size = max(XPC_RP_VARS_SIZE,
+ XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES_SN2);
+ xpc_remote_copy_buffer_sn2 = xpc_kmalloc_cacheline_aligned(buf_size,
+ GFP_KERNEL,
+ &xpc_remote_copy_buffer_base_sn2);
+ if (xpc_remote_copy_buffer_sn2 == NULL) {
+ dev_err(xpc_part, "can't get memory for remote copy buffer\n");
+ return -ENOMEM;
+ }
+
+ /* open up protections for IPI and [potentially] amo operations */
+ xpc_allow_IPI_ops_sn2();
+ xpc_allow_amo_ops_shub_wars_1_1_sn2();
+
+ /*
+ * This is safe to do before the xpc_hb_checker thread has started
+ * because the handler releases a wait queue. If an interrupt is
+ * received before the thread is waiting, it will not go to sleep,
+ * but rather immediately process the interrupt.
+ */
+ ret = request_irq(SGI_XPC_ACTIVATE, xpc_handle_activate_IRQ_sn2, 0,
+ "xpc hb", NULL);
+ if (ret != 0) {
+ dev_err(xpc_part, "can't register ACTIVATE IRQ handler, "
+ "errno=%d\n", -ret);
+ xpc_disallow_IPI_ops_sn2();
+ kfree(xpc_remote_copy_buffer_base_sn2);
+ }
+ return ret;
+}
+
+void
+xpc_exit_sn2(void)
+{
+ free_irq(SGI_XPC_ACTIVATE, NULL);
+ xpc_disallow_IPI_ops_sn2();
+ kfree(xpc_remote_copy_buffer_base_sn2);
+}
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
new file mode 100644
index 00000000..17bbacb1
--- /dev/null
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -0,0 +1,1767 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved.
+ */
+
+/*
+ * Cross Partition Communication (XPC) uv-based functions.
+ *
+ * Architecture specific implementation of common functions.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <asm/uv/uv_hub.h>
+#if defined CONFIG_X86_64
+#include <asm/uv/bios.h>
+#include <asm/uv/uv_irq.h>
+#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
+#include <asm/sn/intr.h>
+#include <asm/sn/sn_sal.h>
+#endif
+#include "../sgi-gru/gru.h"
+#include "../sgi-gru/grukservices.h"
+#include "xpc.h"
+
+#if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
+struct uv_IO_APIC_route_entry {
+ __u64 vector : 8,
+ delivery_mode : 3,
+ dest_mode : 1,
+ delivery_status : 1,
+ polarity : 1,
+ __reserved_1 : 1,
+ trigger : 1,
+ mask : 1,
+ __reserved_2 : 15,
+ dest : 32;
+};
+#endif
+
+static struct xpc_heartbeat_uv *xpc_heartbeat_uv;
+
+#define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES)
+#define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
+ XPC_ACTIVATE_MSG_SIZE_UV)
+#define XPC_ACTIVATE_IRQ_NAME "xpc_activate"
+
+#define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES)
+#define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
+ XPC_NOTIFY_MSG_SIZE_UV)
+#define XPC_NOTIFY_IRQ_NAME "xpc_notify"
+
+static struct xpc_gru_mq_uv *xpc_activate_mq_uv;
+static struct xpc_gru_mq_uv *xpc_notify_mq_uv;
+
+static int
+xpc_setup_partitions_uv(void)
+{
+ short partid;
+ struct xpc_partition_uv *part_uv;
+
+ for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
+ part_uv = &xpc_partitions[partid].sn.uv;
+
+ mutex_init(&part_uv->cached_activate_gru_mq_desc_mutex);
+ spin_lock_init(&part_uv->flags_lock);
+ part_uv->remote_act_state = XPC_P_AS_INACTIVE;
+ }
+ return 0;
+}
+
+static void
+xpc_teardown_partitions_uv(void)
+{
+ short partid;
+ struct xpc_partition_uv *part_uv;
+ unsigned long irq_flags;
+
+ for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
+ part_uv = &xpc_partitions[partid].sn.uv;
+
+ if (part_uv->cached_activate_gru_mq_desc != NULL) {
+ mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex);
+ spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
+ part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
+ spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
+ kfree(part_uv->cached_activate_gru_mq_desc);
+ part_uv->cached_activate_gru_mq_desc = NULL;
+ mutex_unlock(&part_uv->
+ cached_activate_gru_mq_desc_mutex);
+ }
+ }
+}
+
+static int
+xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name)
+{
+ int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
+
+#if defined CONFIG_X86_64
+ mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset,
+ UV_AFFINITY_CPU);
+ if (mq->irq < 0) {
+ dev_err(xpc_part, "uv_setup_irq() returned error=%d\n",
+ -mq->irq);
+ return mq->irq;
+ }
+
+ mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset);
+
+#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
+ if (strcmp(irq_name, XPC_ACTIVATE_IRQ_NAME) == 0)
+ mq->irq = SGI_XPC_ACTIVATE;
+ else if (strcmp(irq_name, XPC_NOTIFY_IRQ_NAME) == 0)
+ mq->irq = SGI_XPC_NOTIFY;
+ else
+ return -EINVAL;
+
+ mq->mmr_value = (unsigned long)cpu_physical_id(cpu) << 32 | mq->irq;
+ uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mq->mmr_value);
+#else
+ #error not a supported configuration
+#endif
+
+ return 0;
+}
+
+static void
+xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq)
+{
+#if defined CONFIG_X86_64
+ uv_teardown_irq(mq->irq);
+
+#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
+ int mmr_pnode;
+ unsigned long mmr_value;
+
+ mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
+ mmr_value = 1UL << 16;
+
+ uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value);
+#else
+ #error not a supported configuration
+#endif
+}
+
+static int
+xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq)
+{
+ int ret;
+
+#if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
+ int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
+
+ ret = sn_mq_watchlist_alloc(mmr_pnode, (void *)uv_gpa(mq->address),
+ mq->order, &mq->mmr_offset);
+ if (ret < 0) {
+ dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n",
+ ret);
+ return -EBUSY;
+ }
+#elif defined CONFIG_X86_64
+ ret = uv_bios_mq_watchlist_alloc(uv_gpa(mq->address),
+ mq->order, &mq->mmr_offset);
+ if (ret < 0) {
+ dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, "
+ "ret=%d\n", ret);
+ return ret;
+ }
+#else
+ #error not a supported configuration
+#endif
+
+ mq->watchlist_num = ret;
+ return 0;
+}
+
+static void
+xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq)
+{
+ int ret;
+ int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
+
+#if defined CONFIG_X86_64
+ ret = uv_bios_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
+ BUG_ON(ret != BIOS_STATUS_SUCCESS);
+#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
+ ret = sn_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
+ BUG_ON(ret != SALRET_OK);
+#else
+ #error not a supported configuration
+#endif
+}
+
+static struct xpc_gru_mq_uv *
+xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
+ irq_handler_t irq_handler)
+{
+ enum xp_retval xp_ret;
+ int ret;
+ int nid;
+ int nasid;
+ int pg_order;
+ struct page *page;
+ struct xpc_gru_mq_uv *mq;
+ struct uv_IO_APIC_route_entry *mmr_value;
+
+ mq = kmalloc(sizeof(struct xpc_gru_mq_uv), GFP_KERNEL);
+ if (mq == NULL) {
+ dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() "
+ "a xpc_gru_mq_uv structure\n");
+ ret = -ENOMEM;
+ goto out_0;
+ }
+
+ mq->gru_mq_desc = kzalloc(sizeof(struct gru_message_queue_desc),
+ GFP_KERNEL);
+ if (mq->gru_mq_desc == NULL) {
+ dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() "
+ "a gru_message_queue_desc structure\n");
+ ret = -ENOMEM;
+ goto out_1;
+ }
+
+ pg_order = get_order(mq_size);
+ mq->order = pg_order + PAGE_SHIFT;
+ mq_size = 1UL << mq->order;
+
+ mq->mmr_blade = uv_cpu_to_blade_id(cpu);
+
+ nid = cpu_to_node(cpu);
+ page = alloc_pages_exact_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
+ pg_order);
+ if (page == NULL) {
+ dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
+ "bytes of memory on nid=%d for GRU mq\n", mq_size, nid);
+ ret = -ENOMEM;
+ goto out_2;
+ }
+ mq->address = page_address(page);
+
+ /* enable generation of irq when GRU mq operation occurs to this mq */
+ ret = xpc_gru_mq_watchlist_alloc_uv(mq);
+ if (ret != 0)
+ goto out_3;
+
+ ret = xpc_get_gru_mq_irq_uv(mq, cpu, irq_name);
+ if (ret != 0)
+ goto out_4;
+
+ ret = request_irq(mq->irq, irq_handler, 0, irq_name, NULL);
+ if (ret != 0) {
+ dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n",
+ mq->irq, -ret);
+ goto out_5;
+ }
+
+ nasid = UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpu));
+
+ mmr_value = (struct uv_IO_APIC_route_entry *)&mq->mmr_value;
+ ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size,
+ nasid, mmr_value->vector, mmr_value->dest);
+ if (ret != 0) {
+ dev_err(xpc_part, "gru_create_message_queue() returned "
+ "error=%d\n", ret);
+ ret = -EINVAL;
+ goto out_6;
+ }
+
+ /* allow other partitions to access this GRU mq */
+ xp_ret = xp_expand_memprotect(xp_pa(mq->address), mq_size);
+ if (xp_ret != xpSuccess) {
+ ret = -EACCES;
+ goto out_6;
+ }
+
+ return mq;
+
+ /* something went wrong */
+out_6:
+ free_irq(mq->irq, NULL);
+out_5:
+ xpc_release_gru_mq_irq_uv(mq);
+out_4:
+ xpc_gru_mq_watchlist_free_uv(mq);
+out_3:
+ free_pages((unsigned long)mq->address, pg_order);
+out_2:
+ kfree(mq->gru_mq_desc);
+out_1:
+ kfree(mq);
+out_0:
+ return ERR_PTR(ret);
+}
+
+static void
+xpc_destroy_gru_mq_uv(struct xpc_gru_mq_uv *mq)
+{
+ unsigned int mq_size;
+ int pg_order;
+ int ret;
+
+ /* disallow other partitions to access GRU mq */
+ mq_size = 1UL << mq->order;
+ ret = xp_restrict_memprotect(xp_pa(mq->address), mq_size);
+ BUG_ON(ret != xpSuccess);
+
+ /* unregister irq handler and release mq irq/vector mapping */
+ free_irq(mq->irq, NULL);
+ xpc_release_gru_mq_irq_uv(mq);
+
+ /* disable generation of irq when GRU mq op occurs to this mq */
+ xpc_gru_mq_watchlist_free_uv(mq);
+
+ pg_order = mq->order - PAGE_SHIFT;
+ free_pages((unsigned long)mq->address, pg_order);
+
+ kfree(mq);
+}
+
+static enum xp_retval
+xpc_send_gru_msg(struct gru_message_queue_desc *gru_mq_desc, void *msg,
+ size_t msg_size)
+{
+ enum xp_retval xp_ret;
+ int ret;
+
+ while (1) {
+ ret = gru_send_message_gpa(gru_mq_desc, msg, msg_size);
+ if (ret == MQE_OK) {
+ xp_ret = xpSuccess;
+ break;
+ }
+
+ if (ret == MQE_QUEUE_FULL) {
+ dev_dbg(xpc_chan, "gru_send_message_gpa() returned "
+ "error=MQE_QUEUE_FULL\n");
+ /* !!! handle QLimit reached; delay & try again */
+ /* ??? Do we add a limit to the number of retries? */
+ (void)msleep_interruptible(10);
+ } else if (ret == MQE_CONGESTION) {
+ dev_dbg(xpc_chan, "gru_send_message_gpa() returned "
+ "error=MQE_CONGESTION\n");
+ /* !!! handle LB Overflow; simply try again */
+ /* ??? Do we add a limit to the number of retries? */
+ } else {
+ /* !!! Currently this is MQE_UNEXPECTED_CB_ERR */
+ dev_err(xpc_chan, "gru_send_message_gpa() returned "
+ "error=%d\n", ret);
+ xp_ret = xpGruSendMqError;
+ break;
+ }
+ }
+ return xp_ret;
+}
+
+static void
+xpc_process_activate_IRQ_rcvd_uv(void)
+{
+ unsigned long irq_flags;
+ short partid;
+ struct xpc_partition *part;
+ u8 act_state_req;
+
+ DBUG_ON(xpc_activate_IRQ_rcvd == 0);
+
+ spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
+ for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
+ part = &xpc_partitions[partid];
+
+ if (part->sn.uv.act_state_req == 0)
+ continue;
+
+ xpc_activate_IRQ_rcvd--;
+ BUG_ON(xpc_activate_IRQ_rcvd < 0);
+
+ act_state_req = part->sn.uv.act_state_req;
+ part->sn.uv.act_state_req = 0;
+ spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
+
+ if (act_state_req == XPC_P_ASR_ACTIVATE_UV) {
+ if (part->act_state == XPC_P_AS_INACTIVE)
+ xpc_activate_partition(part);
+ else if (part->act_state == XPC_P_AS_DEACTIVATING)
+ XPC_DEACTIVATE_PARTITION(part, xpReactivating);
+
+ } else if (act_state_req == XPC_P_ASR_REACTIVATE_UV) {
+ if (part->act_state == XPC_P_AS_INACTIVE)
+ xpc_activate_partition(part);
+ else
+ XPC_DEACTIVATE_PARTITION(part, xpReactivating);
+
+ } else if (act_state_req == XPC_P_ASR_DEACTIVATE_UV) {
+ XPC_DEACTIVATE_PARTITION(part, part->sn.uv.reason);
+
+ } else {
+ BUG();
+ }
+
+ spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
+ if (xpc_activate_IRQ_rcvd == 0)
+ break;
+ }
+ spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
+
+}
+
+static void
+xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
+ struct xpc_activate_mq_msghdr_uv *msg_hdr,
+ int part_setup,
+ int *wakeup_hb_checker)
+{
+ unsigned long irq_flags;
+ struct xpc_partition_uv *part_uv = &part->sn.uv;
+ struct xpc_openclose_args *args;
+
+ part_uv->remote_act_state = msg_hdr->act_state;
+
+ switch (msg_hdr->type) {
+ case XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV:
+ /* syncing of remote_act_state was just done above */
+ break;
+
+ case XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV: {
+ struct xpc_activate_mq_msg_activate_req_uv *msg;
+
+ /*
+ * ??? Do we deal here with ts_jiffies being different
+ * ??? if act_state != XPC_P_AS_INACTIVE instead of
+ * ??? below?
+ */
+ msg = container_of(msg_hdr, struct
+ xpc_activate_mq_msg_activate_req_uv, hdr);
+
+ spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
+ if (part_uv->act_state_req == 0)
+ xpc_activate_IRQ_rcvd++;
+ part_uv->act_state_req = XPC_P_ASR_ACTIVATE_UV;
+ part->remote_rp_pa = msg->rp_gpa; /* !!! _pa is _gpa */
+ part->remote_rp_ts_jiffies = msg_hdr->rp_ts_jiffies;
+ part_uv->heartbeat_gpa = msg->heartbeat_gpa;
+
+ if (msg->activate_gru_mq_desc_gpa !=
+ part_uv->activate_gru_mq_desc_gpa) {
+ spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
+ part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
+ spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
+ part_uv->activate_gru_mq_desc_gpa =
+ msg->activate_gru_mq_desc_gpa;
+ }
+ spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
+
+ (*wakeup_hb_checker)++;
+ break;
+ }
+ case XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV: {
+ struct xpc_activate_mq_msg_deactivate_req_uv *msg;
+
+ msg = container_of(msg_hdr, struct
+ xpc_activate_mq_msg_deactivate_req_uv, hdr);
+
+ spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
+ if (part_uv->act_state_req == 0)
+ xpc_activate_IRQ_rcvd++;
+ part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV;
+ part_uv->reason = msg->reason;
+ spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
+
+ (*wakeup_hb_checker)++;
+ return;
+ }
+ case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV: {
+ struct xpc_activate_mq_msg_chctl_closerequest_uv *msg;
+
+ if (!part_setup)
+ break;
+
+ msg = container_of(msg_hdr, struct
+ xpc_activate_mq_msg_chctl_closerequest_uv,
+ hdr);
+ args = &part->remote_openclose_args[msg->ch_number];
+ args->reason = msg->reason;
+
+ spin_lock_irqsave(&part->chctl_lock, irq_flags);
+ part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREQUEST;
+ spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
+
+ xpc_wakeup_channel_mgr(part);
+ break;
+ }
+ case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV: {
+ struct xpc_activate_mq_msg_chctl_closereply_uv *msg;
+
+ if (!part_setup)
+ break;
+
+ msg = container_of(msg_hdr, struct
+ xpc_activate_mq_msg_chctl_closereply_uv,
+ hdr);
+
+ spin_lock_irqsave(&part->chctl_lock, irq_flags);
+ part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREPLY;
+ spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
+
+ xpc_wakeup_channel_mgr(part);
+ break;
+ }
+ case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV: {
+ struct xpc_activate_mq_msg_chctl_openrequest_uv *msg;
+
+ if (!part_setup)
+ break;
+
+ msg = container_of(msg_hdr, struct
+ xpc_activate_mq_msg_chctl_openrequest_uv,
+ hdr);
+ args = &part->remote_openclose_args[msg->ch_number];
+ args->entry_size = msg->entry_size;
+ args->local_nentries = msg->local_nentries;
+
+ spin_lock_irqsave(&part->chctl_lock, irq_flags);
+ part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREQUEST;
+ spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
+
+ xpc_wakeup_channel_mgr(part);
+ break;
+ }
+ case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV: {
+ struct xpc_activate_mq_msg_chctl_openreply_uv *msg;
+
+ if (!part_setup)
+ break;
+
+ msg = container_of(msg_hdr, struct
+ xpc_activate_mq_msg_chctl_openreply_uv, hdr);
+ args = &part->remote_openclose_args[msg->ch_number];
+ args->remote_nentries = msg->remote_nentries;
+ args->local_nentries = msg->local_nentries;
+ args->local_msgqueue_pa = msg->notify_gru_mq_desc_gpa;
+
+ spin_lock_irqsave(&part->chctl_lock, irq_flags);
+ part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREPLY;
+ spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
+
+ xpc_wakeup_channel_mgr(part);
+ break;
+ }
+ case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV: {
+ struct xpc_activate_mq_msg_chctl_opencomplete_uv *msg;
+
+ if (!part_setup)
+ break;
+
+ msg = container_of(msg_hdr, struct
+ xpc_activate_mq_msg_chctl_opencomplete_uv, hdr);
+ spin_lock_irqsave(&part->chctl_lock, irq_flags);
+ part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENCOMPLETE;
+ spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
+
+ xpc_wakeup_channel_mgr(part);
+ }
+ case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV:
+ spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
+ part_uv->flags |= XPC_P_ENGAGED_UV;
+ spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
+ break;
+
+ case XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV:
+ spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
+ part_uv->flags &= ~XPC_P_ENGAGED_UV;
+ spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
+ break;
+
+ default:
+ dev_err(xpc_part, "received unknown activate_mq msg type=%d "
+ "from partition=%d\n", msg_hdr->type, XPC_PARTID(part));
+
+ /* get hb checker to deactivate from the remote partition */
+ spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
+ if (part_uv->act_state_req == 0)
+ xpc_activate_IRQ_rcvd++;
+ part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV;
+ part_uv->reason = xpBadMsgType;
+ spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
+
+ (*wakeup_hb_checker)++;
+ return;
+ }
+
+ if (msg_hdr->rp_ts_jiffies != part->remote_rp_ts_jiffies &&
+ part->remote_rp_ts_jiffies != 0) {
+ /*
+ * ??? Does what we do here need to be sensitive to
+ * ??? act_state or remote_act_state?
+ */
+ spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
+ if (part_uv->act_state_req == 0)
+ xpc_activate_IRQ_rcvd++;
+ part_uv->act_state_req = XPC_P_ASR_REACTIVATE_UV;
+ spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
+
+ (*wakeup_hb_checker)++;
+ }
+}
+
+static irqreturn_t
+xpc_handle_activate_IRQ_uv(int irq, void *dev_id)
+{
+ struct xpc_activate_mq_msghdr_uv *msg_hdr;
+ short partid;
+ struct xpc_partition *part;
+ int wakeup_hb_checker = 0;
+ int part_referenced;
+
+ while (1) {
+ msg_hdr = gru_get_next_message(xpc_activate_mq_uv->gru_mq_desc);
+ if (msg_hdr == NULL)
+ break;
+
+ partid = msg_hdr->partid;
+ if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) {
+ dev_err(xpc_part, "xpc_handle_activate_IRQ_uv() "
+ "received invalid partid=0x%x in message\n",
+ partid);
+ } else {
+ part = &xpc_partitions[partid];
+
+ part_referenced = xpc_part_ref(part);
+ xpc_handle_activate_mq_msg_uv(part, msg_hdr,
+ part_referenced,
+ &wakeup_hb_checker);
+ if (part_referenced)
+ xpc_part_deref(part);
+ }
+
+ gru_free_message(xpc_activate_mq_uv->gru_mq_desc, msg_hdr);
+ }
+
+ if (wakeup_hb_checker)
+ wake_up_interruptible(&xpc_activate_IRQ_wq);
+
+ return IRQ_HANDLED;
+}
+
+static enum xp_retval
+xpc_cache_remote_gru_mq_desc_uv(struct gru_message_queue_desc *gru_mq_desc,
+ unsigned long gru_mq_desc_gpa)
+{
+ enum xp_retval ret;
+
+ ret = xp_remote_memcpy(uv_gpa(gru_mq_desc), gru_mq_desc_gpa,
+ sizeof(struct gru_message_queue_desc));
+ if (ret == xpSuccess)
+ gru_mq_desc->mq = NULL;
+
+ return ret;
+}
+
+static enum xp_retval
+xpc_send_activate_IRQ_uv(struct xpc_partition *part, void *msg, size_t msg_size,
+ int msg_type)
+{
+ struct xpc_activate_mq_msghdr_uv *msg_hdr = msg;
+ struct xpc_partition_uv *part_uv = &part->sn.uv;
+ struct gru_message_queue_desc *gru_mq_desc;
+ unsigned long irq_flags;
+ enum xp_retval ret;
+
+ DBUG_ON(msg_size > XPC_ACTIVATE_MSG_SIZE_UV);
+
+ msg_hdr->type = msg_type;
+ msg_hdr->partid = xp_partition_id;
+ msg_hdr->act_state = part->act_state;
+ msg_hdr->rp_ts_jiffies = xpc_rsvd_page->ts_jiffies;
+
+ mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex);
+again:
+ if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV)) {
+ gru_mq_desc = part_uv->cached_activate_gru_mq_desc;
+ if (gru_mq_desc == NULL) {
+ gru_mq_desc = kmalloc(sizeof(struct
+ gru_message_queue_desc),
+ GFP_KERNEL);
+ if (gru_mq_desc == NULL) {
+ ret = xpNoMemory;
+ goto done;
+ }
+ part_uv->cached_activate_gru_mq_desc = gru_mq_desc;
+ }
+
+ ret = xpc_cache_remote_gru_mq_desc_uv(gru_mq_desc,
+ part_uv->
+ activate_gru_mq_desc_gpa);
+ if (ret != xpSuccess)
+ goto done;
+
+ spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
+ part_uv->flags |= XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
+ spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
+ }
+
+ /* ??? Is holding a spin_lock (ch->lock) during this call a bad idea? */
+ ret = xpc_send_gru_msg(part_uv->cached_activate_gru_mq_desc, msg,
+ msg_size);
+ if (ret != xpSuccess) {
+ smp_rmb(); /* ensure a fresh copy of part_uv->flags */
+ if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV))
+ goto again;
+ }
+done:
+ mutex_unlock(&part_uv->cached_activate_gru_mq_desc_mutex);
+ return ret;
+}
+
+static void
+xpc_send_activate_IRQ_part_uv(struct xpc_partition *part, void *msg,
+ size_t msg_size, int msg_type)
+{
+ enum xp_retval ret;
+
+ ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type);
+ if (unlikely(ret != xpSuccess))
+ XPC_DEACTIVATE_PARTITION(part, ret);
+}
+
+static void
+xpc_send_activate_IRQ_ch_uv(struct xpc_channel *ch, unsigned long *irq_flags,
+ void *msg, size_t msg_size, int msg_type)
+{
+ struct xpc_partition *part = &xpc_partitions[ch->partid];
+ enum xp_retval ret;
+
+ ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type);
+ if (unlikely(ret != xpSuccess)) {
+ if (irq_flags != NULL)
+ spin_unlock_irqrestore(&ch->lock, *irq_flags);
+
+ XPC_DEACTIVATE_PARTITION(part, ret);
+
+ if (irq_flags != NULL)
+ spin_lock_irqsave(&ch->lock, *irq_flags);
+ }
+}
+
+static void
+xpc_send_local_activate_IRQ_uv(struct xpc_partition *part, int act_state_req)
+{
+ unsigned long irq_flags;
+ struct xpc_partition_uv *part_uv = &part->sn.uv;
+
+ /*
+ * !!! Make our side think that the remote partition sent an activate
+ * !!! mq message our way by doing what the activate IRQ handler would
+ * !!! do had one really been sent.
+ */
+
+ spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
+ if (part_uv->act_state_req == 0)
+ xpc_activate_IRQ_rcvd++;
+ part_uv->act_state_req = act_state_req;
+ spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
+
+ wake_up_interruptible(&xpc_activate_IRQ_wq);
+}
+
+static enum xp_retval
+xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa,
+ size_t *len)
+{
+ s64 status;
+ enum xp_retval ret;
+
+#if defined CONFIG_X86_64
+ status = uv_bios_reserved_page_pa((u64)buf, cookie, (u64 *)rp_pa,
+ (u64 *)len);
+ if (status == BIOS_STATUS_SUCCESS)
+ ret = xpSuccess;
+ else if (status == BIOS_STATUS_MORE_PASSES)
+ ret = xpNeedMoreInfo;
+ else
+ ret = xpBiosError;
+
+#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
+ status = sn_partition_reserved_page_pa((u64)buf, cookie, rp_pa, len);
+ if (status == SALRET_OK)
+ ret = xpSuccess;
+ else if (status == SALRET_MORE_PASSES)
+ ret = xpNeedMoreInfo;
+ else
+ ret = xpSalError;
+
+#else
+ #error not a supported configuration
+#endif
+
+ return ret;
+}
+
+static int
+xpc_setup_rsvd_page_uv(struct xpc_rsvd_page *rp)
+{
+ xpc_heartbeat_uv =
+ &xpc_partitions[sn_partition_id].sn.uv.cached_heartbeat;
+ rp->sn.uv.heartbeat_gpa = uv_gpa(xpc_heartbeat_uv);
+ rp->sn.uv.activate_gru_mq_desc_gpa =
+ uv_gpa(xpc_activate_mq_uv->gru_mq_desc);
+ return 0;
+}
+
+static void
+xpc_allow_hb_uv(short partid)
+{
+}
+
+static void
+xpc_disallow_hb_uv(short partid)
+{
+}
+
+static void
+xpc_disallow_all_hbs_uv(void)
+{
+}
+
+static void
+xpc_increment_heartbeat_uv(void)
+{
+ xpc_heartbeat_uv->value++;
+}
+
+static void
+xpc_offline_heartbeat_uv(void)
+{
+ xpc_increment_heartbeat_uv();
+ xpc_heartbeat_uv->offline = 1;
+}
+
+static void
+xpc_online_heartbeat_uv(void)
+{
+ xpc_increment_heartbeat_uv();
+ xpc_heartbeat_uv->offline = 0;
+}
+
+static void
+xpc_heartbeat_init_uv(void)
+{
+ xpc_heartbeat_uv->value = 1;
+ xpc_heartbeat_uv->offline = 0;
+}
+
+static void
+xpc_heartbeat_exit_uv(void)
+{
+ xpc_offline_heartbeat_uv();
+}
+
+static enum xp_retval
+xpc_get_remote_heartbeat_uv(struct xpc_partition *part)
+{
+ struct xpc_partition_uv *part_uv = &part->sn.uv;
+ enum xp_retval ret;
+
+ ret = xp_remote_memcpy(uv_gpa(&part_uv->cached_heartbeat),
+ part_uv->heartbeat_gpa,
+ sizeof(struct xpc_heartbeat_uv));
+ if (ret != xpSuccess)
+ return ret;
+
+ if (part_uv->cached_heartbeat.value == part->last_heartbeat &&
+ !part_uv->cached_heartbeat.offline) {
+
+ ret = xpNoHeartbeat;
+ } else {
+ part->last_heartbeat = part_uv->cached_heartbeat.value;
+ }
+ return ret;
+}
+
+static void
+xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp,
+ unsigned long remote_rp_gpa, int nasid)
+{
+ short partid = remote_rp->SAL_partid;
+ struct xpc_partition *part = &xpc_partitions[partid];
+ struct xpc_activate_mq_msg_activate_req_uv msg;
+
+ part->remote_rp_pa = remote_rp_gpa; /* !!! _pa here is really _gpa */
+ part->remote_rp_ts_jiffies = remote_rp->ts_jiffies;
+ part->sn.uv.heartbeat_gpa = remote_rp->sn.uv.heartbeat_gpa;
+ part->sn.uv.activate_gru_mq_desc_gpa =
+ remote_rp->sn.uv.activate_gru_mq_desc_gpa;
+
+ /*
+ * ??? Is it a good idea to make this conditional on what is
+ * ??? potentially stale state information?
+ */
+ if (part->sn.uv.remote_act_state == XPC_P_AS_INACTIVE) {
+ msg.rp_gpa = uv_gpa(xpc_rsvd_page);
+ msg.heartbeat_gpa = xpc_rsvd_page->sn.uv.heartbeat_gpa;
+ msg.activate_gru_mq_desc_gpa =
+ xpc_rsvd_page->sn.uv.activate_gru_mq_desc_gpa;
+ xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
+ XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV);
+ }
+
+ if (part->act_state == XPC_P_AS_INACTIVE)
+ xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV);
+}
+
+static void
+xpc_request_partition_reactivation_uv(struct xpc_partition *part)
+{
+ xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV);
+}
+
+static void
+xpc_request_partition_deactivation_uv(struct xpc_partition *part)
+{
+ struct xpc_activate_mq_msg_deactivate_req_uv msg;
+
+ /*
+ * ??? Is it a good idea to make this conditional on what is
+ * ??? potentially stale state information?
+ */
+ if (part->sn.uv.remote_act_state != XPC_P_AS_DEACTIVATING &&
+ part->sn.uv.remote_act_state != XPC_P_AS_INACTIVE) {
+
+ msg.reason = part->reason;
+ xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
+ XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV);
+ }
+}
+
+static void
+xpc_cancel_partition_deactivation_request_uv(struct xpc_partition *part)
+{
+ /* nothing needs to be done */
+ return;
+}
+
+static void
+xpc_init_fifo_uv(struct xpc_fifo_head_uv *head)
+{
+ head->first = NULL;
+ head->last = NULL;
+ spin_lock_init(&head->lock);
+ head->n_entries = 0;
+}
+
+static void *
+xpc_get_fifo_entry_uv(struct xpc_fifo_head_uv *head)
+{
+ unsigned long irq_flags;
+ struct xpc_fifo_entry_uv *first;
+
+ spin_lock_irqsave(&head->lock, irq_flags);
+ first = head->first;
+ if (head->first != NULL) {
+ head->first = first->next;
+ if (head->first == NULL)
+ head->last = NULL;
+
+ head->n_entries--;
+ BUG_ON(head->n_entries < 0);
+
+ first->next = NULL;
+ }
+ spin_unlock_irqrestore(&head->lock, irq_flags);
+ return first;
+}
+
+static void
+xpc_put_fifo_entry_uv(struct xpc_fifo_head_uv *head,
+ struct xpc_fifo_entry_uv *last)
+{
+ unsigned long irq_flags;
+
+ last->next = NULL;
+ spin_lock_irqsave(&head->lock, irq_flags);
+ if (head->last != NULL)
+ head->last->next = last;
+ else
+ head->first = last;
+ head->last = last;
+ head->n_entries++;
+ spin_unlock_irqrestore(&head->lock, irq_flags);
+}
+
+static int
+xpc_n_of_fifo_entries_uv(struct xpc_fifo_head_uv *head)
+{
+ return head->n_entries;
+}
+
+/*
+ * Setup the channel structures that are uv specific.
+ */
+static enum xp_retval
+xpc_setup_ch_structures_uv(struct xpc_partition *part)
+{
+ struct xpc_channel_uv *ch_uv;
+ int ch_number;
+
+ for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
+ ch_uv = &part->channels[ch_number].sn.uv;
+
+ xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
+ xpc_init_fifo_uv(&ch_uv->recv_msg_list);
+ }
+
+ return xpSuccess;
+}
+
+/*
+ * Teardown the channel structures that are uv specific.
+ */
+static void
+xpc_teardown_ch_structures_uv(struct xpc_partition *part)
+{
+ /* nothing needs to be done */
+ return;
+}
+
+static enum xp_retval
+xpc_make_first_contact_uv(struct xpc_partition *part)
+{
+ struct xpc_activate_mq_msg_uv msg;
+
+ /*
+ * We send a sync msg to get the remote partition's remote_act_state
+ * updated to our current act_state which at this point should
+ * be XPC_P_AS_ACTIVATING.
+ */
+ xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
+ XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV);
+
+ while (!((part->sn.uv.remote_act_state == XPC_P_AS_ACTIVATING) ||
+ (part->sn.uv.remote_act_state == XPC_P_AS_ACTIVE))) {
+
+ dev_dbg(xpc_part, "waiting to make first contact with "
+ "partition %d\n", XPC_PARTID(part));
+
+ /* wait a 1/4 of a second or so */
+ (void)msleep_interruptible(250);
+
+ if (part->act_state == XPC_P_AS_DEACTIVATING)
+ return part->reason;
+ }
+
+ return xpSuccess;
+}
+
+static u64
+xpc_get_chctl_all_flags_uv(struct xpc_partition *part)
+{
+ unsigned long irq_flags;
+ union xpc_channel_ctl_flags chctl;
+
+ spin_lock_irqsave(&part->chctl_lock, irq_flags);
+ chctl = part->chctl;
+ if (chctl.all_flags != 0)
+ part->chctl.all_flags = 0;
+
+ spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
+ return chctl.all_flags;
+}
+
+static enum xp_retval
+xpc_allocate_send_msg_slot_uv(struct xpc_channel *ch)
+{
+ struct xpc_channel_uv *ch_uv = &ch->sn.uv;
+ struct xpc_send_msg_slot_uv *msg_slot;
+ unsigned long irq_flags;
+ int nentries;
+ int entry;
+ size_t nbytes;
+
+ for (nentries = ch->local_nentries; nentries > 0; nentries--) {
+ nbytes = nentries * sizeof(struct xpc_send_msg_slot_uv);
+ ch_uv->send_msg_slots = kzalloc(nbytes, GFP_KERNEL);
+ if (ch_uv->send_msg_slots == NULL)
+ continue;
+
+ for (entry = 0; entry < nentries; entry++) {
+ msg_slot = &ch_uv->send_msg_slots[entry];
+
+ msg_slot->msg_slot_number = entry;
+ xpc_put_fifo_entry_uv(&ch_uv->msg_slot_free_list,
+ &msg_slot->next);
+ }
+
+ spin_lock_irqsave(&ch->lock, irq_flags);
+ if (nentries < ch->local_nentries)
+ ch->local_nentries = nentries;
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ return xpSuccess;
+ }
+
+ return xpNoMemory;
+}
+
+static enum xp_retval
+xpc_allocate_recv_msg_slot_uv(struct xpc_channel *ch)
+{
+ struct xpc_channel_uv *ch_uv = &ch->sn.uv;
+ struct xpc_notify_mq_msg_uv *msg_slot;
+ unsigned long irq_flags;
+ int nentries;
+ int entry;
+ size_t nbytes;
+
+ for (nentries = ch->remote_nentries; nentries > 0; nentries--) {
+ nbytes = nentries * ch->entry_size;
+ ch_uv->recv_msg_slots = kzalloc(nbytes, GFP_KERNEL);
+ if (ch_uv->recv_msg_slots == NULL)
+ continue;
+
+ for (entry = 0; entry < nentries; entry++) {
+ msg_slot = ch_uv->recv_msg_slots +
+ entry * ch->entry_size;
+
+ msg_slot->hdr.msg_slot_number = entry;
+ }
+
+ spin_lock_irqsave(&ch->lock, irq_flags);
+ if (nentries < ch->remote_nentries)
+ ch->remote_nentries = nentries;
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ return xpSuccess;
+ }
+
+ return xpNoMemory;
+}
+
+/*
+ * Allocate msg_slots associated with the channel.
+ */
+static enum xp_retval
+xpc_setup_msg_structures_uv(struct xpc_channel *ch)
+{
+ static enum xp_retval ret;
+ struct xpc_channel_uv *ch_uv = &ch->sn.uv;
+
+ DBUG_ON(ch->flags & XPC_C_SETUP);
+
+ ch_uv->cached_notify_gru_mq_desc = kmalloc(sizeof(struct
+ gru_message_queue_desc),
+ GFP_KERNEL);
+ if (ch_uv->cached_notify_gru_mq_desc == NULL)
+ return xpNoMemory;
+
+ ret = xpc_allocate_send_msg_slot_uv(ch);
+ if (ret == xpSuccess) {
+
+ ret = xpc_allocate_recv_msg_slot_uv(ch);
+ if (ret != xpSuccess) {
+ kfree(ch_uv->send_msg_slots);
+ xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
+ }
+ }
+ return ret;
+}
+
+/*
+ * Free up msg_slots and clear other stuff that were setup for the specified
+ * channel.
+ */
+static void
+xpc_teardown_msg_structures_uv(struct xpc_channel *ch)
+{
+ struct xpc_channel_uv *ch_uv = &ch->sn.uv;
+
+ DBUG_ON(!spin_is_locked(&ch->lock));
+
+ kfree(ch_uv->cached_notify_gru_mq_desc);
+ ch_uv->cached_notify_gru_mq_desc = NULL;
+
+ if (ch->flags & XPC_C_SETUP) {
+ xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
+ kfree(ch_uv->send_msg_slots);
+ xpc_init_fifo_uv(&ch_uv->recv_msg_list);
+ kfree(ch_uv->recv_msg_slots);
+ }
+}
+
+static void
+xpc_send_chctl_closerequest_uv(struct xpc_channel *ch, unsigned long *irq_flags)
+{
+ struct xpc_activate_mq_msg_chctl_closerequest_uv msg;
+
+ msg.ch_number = ch->number;
+ msg.reason = ch->reason;
+ xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
+ XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV);
+}
+
+static void
+xpc_send_chctl_closereply_uv(struct xpc_channel *ch, unsigned long *irq_flags)
+{
+ struct xpc_activate_mq_msg_chctl_closereply_uv msg;
+
+ msg.ch_number = ch->number;
+ xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
+ XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV);
+}
+
+static void
+xpc_send_chctl_openrequest_uv(struct xpc_channel *ch, unsigned long *irq_flags)
+{
+ struct xpc_activate_mq_msg_chctl_openrequest_uv msg;
+
+ msg.ch_number = ch->number;
+ msg.entry_size = ch->entry_size;
+ msg.local_nentries = ch->local_nentries;
+ xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
+ XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV);
+}
+
+static void
+xpc_send_chctl_openreply_uv(struct xpc_channel *ch, unsigned long *irq_flags)
+{
+ struct xpc_activate_mq_msg_chctl_openreply_uv msg;
+
+ msg.ch_number = ch->number;
+ msg.local_nentries = ch->local_nentries;
+ msg.remote_nentries = ch->remote_nentries;
+ msg.notify_gru_mq_desc_gpa = uv_gpa(xpc_notify_mq_uv->gru_mq_desc);
+ xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
+ XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV);
+}
+
+static void
+xpc_send_chctl_opencomplete_uv(struct xpc_channel *ch, unsigned long *irq_flags)
+{
+ struct xpc_activate_mq_msg_chctl_opencomplete_uv msg;
+
+ msg.ch_number = ch->number;
+ xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
+ XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV);
+}
+
+static void
+xpc_send_chctl_local_msgrequest_uv(struct xpc_partition *part, int ch_number)
+{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&part->chctl_lock, irq_flags);
+ part->chctl.flags[ch_number] |= XPC_CHCTL_MSGREQUEST;
+ spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
+
+ xpc_wakeup_channel_mgr(part);
+}
+
+static enum xp_retval
+xpc_save_remote_msgqueue_pa_uv(struct xpc_channel *ch,
+ unsigned long gru_mq_desc_gpa)
+{
+ struct xpc_channel_uv *ch_uv = &ch->sn.uv;
+
+ DBUG_ON(ch_uv->cached_notify_gru_mq_desc == NULL);
+ return xpc_cache_remote_gru_mq_desc_uv(ch_uv->cached_notify_gru_mq_desc,
+ gru_mq_desc_gpa);
+}
+
+static void
+xpc_indicate_partition_engaged_uv(struct xpc_partition *part)
+{
+ struct xpc_activate_mq_msg_uv msg;
+
+ xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
+ XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV);
+}
+
+static void
+xpc_indicate_partition_disengaged_uv(struct xpc_partition *part)
+{
+ struct xpc_activate_mq_msg_uv msg;
+
+ xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
+ XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV);
+}
+
+static void
+xpc_assume_partition_disengaged_uv(short partid)
+{
+ struct xpc_partition_uv *part_uv = &xpc_partitions[partid].sn.uv;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
+ part_uv->flags &= ~XPC_P_ENGAGED_UV;
+ spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
+}
+
+static int
+xpc_partition_engaged_uv(short partid)
+{
+ return (xpc_partitions[partid].sn.uv.flags & XPC_P_ENGAGED_UV) != 0;
+}
+
+static int
+xpc_any_partition_engaged_uv(void)
+{
+ struct xpc_partition_uv *part_uv;
+ short partid;
+
+ for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
+ part_uv = &xpc_partitions[partid].sn.uv;
+ if ((part_uv->flags & XPC_P_ENGAGED_UV) != 0)
+ return 1;
+ }
+ return 0;
+}
+
+static enum xp_retval
+xpc_allocate_msg_slot_uv(struct xpc_channel *ch, u32 flags,
+ struct xpc_send_msg_slot_uv **address_of_msg_slot)
+{
+ enum xp_retval ret;
+ struct xpc_send_msg_slot_uv *msg_slot;
+ struct xpc_fifo_entry_uv *entry;
+
+ while (1) {
+ entry = xpc_get_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list);
+ if (entry != NULL)
+ break;
+
+ if (flags & XPC_NOWAIT)
+ return xpNoWait;
+
+ ret = xpc_allocate_msg_wait(ch);
+ if (ret != xpInterrupted && ret != xpTimeout)
+ return ret;
+ }
+
+ msg_slot = container_of(entry, struct xpc_send_msg_slot_uv, next);
+ *address_of_msg_slot = msg_slot;
+ return xpSuccess;
+}
+
+static void
+xpc_free_msg_slot_uv(struct xpc_channel *ch,
+ struct xpc_send_msg_slot_uv *msg_slot)
+{
+ xpc_put_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list, &msg_slot->next);
+
+ /* wakeup anyone waiting for a free msg slot */
+ if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
+ wake_up(&ch->msg_allocate_wq);
+}
+
+static void
+xpc_notify_sender_uv(struct xpc_channel *ch,
+ struct xpc_send_msg_slot_uv *msg_slot,
+ enum xp_retval reason)
+{
+ xpc_notify_func func = msg_slot->func;
+
+ if (func != NULL && cmpxchg(&msg_slot->func, func, NULL) == func) {
+
+ atomic_dec(&ch->n_to_notify);
+
+ dev_dbg(xpc_chan, "msg_slot->func() called, msg_slot=0x%p "
+ "msg_slot_number=%d partid=%d channel=%d\n", msg_slot,
+ msg_slot->msg_slot_number, ch->partid, ch->number);
+
+ func(reason, ch->partid, ch->number, msg_slot->key);
+
+ dev_dbg(xpc_chan, "msg_slot->func() returned, msg_slot=0x%p "
+ "msg_slot_number=%d partid=%d channel=%d\n", msg_slot,
+ msg_slot->msg_slot_number, ch->partid, ch->number);
+ }
+}
+
+static void
+xpc_handle_notify_mq_ack_uv(struct xpc_channel *ch,
+ struct xpc_notify_mq_msg_uv *msg)
+{
+ struct xpc_send_msg_slot_uv *msg_slot;
+ int entry = msg->hdr.msg_slot_number % ch->local_nentries;
+
+ msg_slot = &ch->sn.uv.send_msg_slots[entry];
+
+ BUG_ON(msg_slot->msg_slot_number != msg->hdr.msg_slot_number);
+ msg_slot->msg_slot_number += ch->local_nentries;
+
+ if (msg_slot->func != NULL)
+ xpc_notify_sender_uv(ch, msg_slot, xpMsgDelivered);
+
+ xpc_free_msg_slot_uv(ch, msg_slot);
+}
+
+static void
+xpc_handle_notify_mq_msg_uv(struct xpc_partition *part,
+ struct xpc_notify_mq_msg_uv *msg)
+{
+ struct xpc_partition_uv *part_uv = &part->sn.uv;
+ struct xpc_channel *ch;
+ struct xpc_channel_uv *ch_uv;
+ struct xpc_notify_mq_msg_uv *msg_slot;
+ unsigned long irq_flags;
+ int ch_number = msg->hdr.ch_number;
+
+ if (unlikely(ch_number >= part->nchannels)) {
+ dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received invalid "
+ "channel number=0x%x in message from partid=%d\n",
+ ch_number, XPC_PARTID(part));
+
+ /* get hb checker to deactivate from the remote partition */
+ spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
+ if (part_uv->act_state_req == 0)
+ xpc_activate_IRQ_rcvd++;
+ part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV;
+ part_uv->reason = xpBadChannelNumber;
+ spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
+
+ wake_up_interruptible(&xpc_activate_IRQ_wq);
+ return;
+ }
+
+ ch = &part->channels[ch_number];
+ xpc_msgqueue_ref(ch);
+
+ if (!(ch->flags & XPC_C_CONNECTED)) {
+ xpc_msgqueue_deref(ch);
+ return;
+ }
+
+ /* see if we're really dealing with an ACK for a previously sent msg */
+ if (msg->hdr.size == 0) {
+ xpc_handle_notify_mq_ack_uv(ch, msg);
+ xpc_msgqueue_deref(ch);
+ return;
+ }
+
+ /* we're dealing with a normal message sent via the notify_mq */
+ ch_uv = &ch->sn.uv;
+
+ msg_slot = ch_uv->recv_msg_slots +
+ (msg->hdr.msg_slot_number % ch->remote_nentries) * ch->entry_size;
+
+ BUG_ON(msg_slot->hdr.size != 0);
+
+ memcpy(msg_slot, msg, msg->hdr.size);
+
+ xpc_put_fifo_entry_uv(&ch_uv->recv_msg_list, &msg_slot->hdr.u.next);
+
+ if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) {
+ /*
+ * If there is an existing idle kthread get it to deliver
+ * the payload, otherwise we'll have to get the channel mgr
+ * for this partition to create a kthread to do the delivery.
+ */
+ if (atomic_read(&ch->kthreads_idle) > 0)
+ wake_up_nr(&ch->idle_wq, 1);
+ else
+ xpc_send_chctl_local_msgrequest_uv(part, ch->number);
+ }
+ xpc_msgqueue_deref(ch);
+}
+
+static irqreturn_t
+xpc_handle_notify_IRQ_uv(int irq, void *dev_id)
+{
+ struct xpc_notify_mq_msg_uv *msg;
+ short partid;
+ struct xpc_partition *part;
+
+ while ((msg = gru_get_next_message(xpc_notify_mq_uv->gru_mq_desc)) !=
+ NULL) {
+
+ partid = msg->hdr.partid;
+ if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) {
+ dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received "
+ "invalid partid=0x%x in message\n", partid);
+ } else {
+ part = &xpc_partitions[partid];
+
+ if (xpc_part_ref(part)) {
+ xpc_handle_notify_mq_msg_uv(part, msg);
+ xpc_part_deref(part);
+ }
+ }
+
+ gru_free_message(xpc_notify_mq_uv->gru_mq_desc, msg);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int
+xpc_n_of_deliverable_payloads_uv(struct xpc_channel *ch)
+{
+ return xpc_n_of_fifo_entries_uv(&ch->sn.uv.recv_msg_list);
+}
+
+static void
+xpc_process_msg_chctl_flags_uv(struct xpc_partition *part, int ch_number)
+{
+ struct xpc_channel *ch = &part->channels[ch_number];
+ int ndeliverable_payloads;
+
+ xpc_msgqueue_ref(ch);
+
+ ndeliverable_payloads = xpc_n_of_deliverable_payloads_uv(ch);
+
+ if (ndeliverable_payloads > 0 &&
+ (ch->flags & XPC_C_CONNECTED) &&
+ (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE)) {
+
+ xpc_activate_kthreads(ch, ndeliverable_payloads);
+ }
+
+ xpc_msgqueue_deref(ch);
+}
+
+static enum xp_retval
+xpc_send_payload_uv(struct xpc_channel *ch, u32 flags, void *payload,
+ u16 payload_size, u8 notify_type, xpc_notify_func func,
+ void *key)
+{
+ enum xp_retval ret = xpSuccess;
+ struct xpc_send_msg_slot_uv *msg_slot = NULL;
+ struct xpc_notify_mq_msg_uv *msg;
+ u8 msg_buffer[XPC_NOTIFY_MSG_SIZE_UV];
+ size_t msg_size;
+
+ DBUG_ON(notify_type != XPC_N_CALL);
+
+ msg_size = sizeof(struct xpc_notify_mq_msghdr_uv) + payload_size;
+ if (msg_size > ch->entry_size)
+ return xpPayloadTooBig;
+
+ xpc_msgqueue_ref(ch);
+
+ if (ch->flags & XPC_C_DISCONNECTING) {
+ ret = ch->reason;
+ goto out_1;
+ }
+ if (!(ch->flags & XPC_C_CONNECTED)) {
+ ret = xpNotConnected;
+ goto out_1;
+ }
+
+ ret = xpc_allocate_msg_slot_uv(ch, flags, &msg_slot);
+ if (ret != xpSuccess)
+ goto out_1;
+
+ if (func != NULL) {
+ atomic_inc(&ch->n_to_notify);
+
+ msg_slot->key = key;
+ smp_wmb(); /* a non-NULL func must hit memory after the key */
+ msg_slot->func = func;
+
+ if (ch->flags & XPC_C_DISCONNECTING) {
+ ret = ch->reason;
+ goto out_2;
+ }
+ }
+
+ msg = (struct xpc_notify_mq_msg_uv *)&msg_buffer;
+ msg->hdr.partid = xp_partition_id;
+ msg->hdr.ch_number = ch->number;
+ msg->hdr.size = msg_size;
+ msg->hdr.msg_slot_number = msg_slot->msg_slot_number;
+ memcpy(&msg->payload, payload, payload_size);
+
+ ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg,
+ msg_size);
+ if (ret == xpSuccess)
+ goto out_1;
+
+ XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
+out_2:
+ if (func != NULL) {
+ /*
+ * Try to NULL the msg_slot's func field. If we fail, then
+ * xpc_notify_senders_of_disconnect_uv() beat us to it, in which
+ * case we need to pretend we succeeded to send the message
+ * since the user will get a callout for the disconnect error
+ * by xpc_notify_senders_of_disconnect_uv(), and to also get an
+ * error returned here will confuse them. Additionally, since
+ * in this case the channel is being disconnected we don't need
+ * to put the the msg_slot back on the free list.
+ */
+ if (cmpxchg(&msg_slot->func, func, NULL) != func) {
+ ret = xpSuccess;
+ goto out_1;
+ }
+
+ msg_slot->key = NULL;
+ atomic_dec(&ch->n_to_notify);
+ }
+ xpc_free_msg_slot_uv(ch, msg_slot);
+out_1:
+ xpc_msgqueue_deref(ch);
+ return ret;
+}
+
+/*
+ * Tell the callers of xpc_send_notify() that the status of their payloads
+ * is unknown because the channel is now disconnecting.
+ *
+ * We don't worry about putting these msg_slots on the free list since the
+ * msg_slots themselves are about to be kfree'd.
+ */
+static void
+xpc_notify_senders_of_disconnect_uv(struct xpc_channel *ch)
+{
+ struct xpc_send_msg_slot_uv *msg_slot;
+ int entry;
+
+ DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING));
+
+ for (entry = 0; entry < ch->local_nentries; entry++) {
+
+ if (atomic_read(&ch->n_to_notify) == 0)
+ break;
+
+ msg_slot = &ch->sn.uv.send_msg_slots[entry];
+ if (msg_slot->func != NULL)
+ xpc_notify_sender_uv(ch, msg_slot, ch->reason);
+ }
+}
+
+/*
+ * Get the next deliverable message's payload.
+ */
+static void *
+xpc_get_deliverable_payload_uv(struct xpc_channel *ch)
+{
+ struct xpc_fifo_entry_uv *entry;
+ struct xpc_notify_mq_msg_uv *msg;
+ void *payload = NULL;
+
+ if (!(ch->flags & XPC_C_DISCONNECTING)) {
+ entry = xpc_get_fifo_entry_uv(&ch->sn.uv.recv_msg_list);
+ if (entry != NULL) {
+ msg = container_of(entry, struct xpc_notify_mq_msg_uv,
+ hdr.u.next);
+ payload = &msg->payload;
+ }
+ }
+ return payload;
+}
+
+static void
+xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
+{
+ struct xpc_notify_mq_msg_uv *msg;
+ enum xp_retval ret;
+
+ msg = container_of(payload, struct xpc_notify_mq_msg_uv, payload);
+
+ /* return an ACK to the sender of this message */
+
+ msg->hdr.partid = xp_partition_id;
+ msg->hdr.size = 0; /* size of zero indicates this is an ACK */
+
+ ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg,
+ sizeof(struct xpc_notify_mq_msghdr_uv));
+ if (ret != xpSuccess)
+ XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
+}
+
+static struct xpc_arch_operations xpc_arch_ops_uv = {
+ .setup_partitions = xpc_setup_partitions_uv,
+ .teardown_partitions = xpc_teardown_partitions_uv,
+ .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
+ .get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_uv,
+ .setup_rsvd_page = xpc_setup_rsvd_page_uv,
+
+ .allow_hb = xpc_allow_hb_uv,
+ .disallow_hb = xpc_disallow_hb_uv,
+ .disallow_all_hbs = xpc_disallow_all_hbs_uv,
+ .increment_heartbeat = xpc_increment_heartbeat_uv,
+ .offline_heartbeat = xpc_offline_heartbeat_uv,
+ .online_heartbeat = xpc_online_heartbeat_uv,
+ .heartbeat_init = xpc_heartbeat_init_uv,
+ .heartbeat_exit = xpc_heartbeat_exit_uv,
+ .get_remote_heartbeat = xpc_get_remote_heartbeat_uv,
+
+ .request_partition_activation =
+ xpc_request_partition_activation_uv,
+ .request_partition_reactivation =
+ xpc_request_partition_reactivation_uv,
+ .request_partition_deactivation =
+ xpc_request_partition_deactivation_uv,
+ .cancel_partition_deactivation_request =
+ xpc_cancel_partition_deactivation_request_uv,
+
+ .setup_ch_structures = xpc_setup_ch_structures_uv,
+ .teardown_ch_structures = xpc_teardown_ch_structures_uv,
+
+ .make_first_contact = xpc_make_first_contact_uv,
+
+ .get_chctl_all_flags = xpc_get_chctl_all_flags_uv,
+ .send_chctl_closerequest = xpc_send_chctl_closerequest_uv,
+ .send_chctl_closereply = xpc_send_chctl_closereply_uv,
+ .send_chctl_openrequest = xpc_send_chctl_openrequest_uv,
+ .send_chctl_openreply = xpc_send_chctl_openreply_uv,
+ .send_chctl_opencomplete = xpc_send_chctl_opencomplete_uv,
+ .process_msg_chctl_flags = xpc_process_msg_chctl_flags_uv,
+
+ .save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_uv,
+
+ .setup_msg_structures = xpc_setup_msg_structures_uv,
+ .teardown_msg_structures = xpc_teardown_msg_structures_uv,
+
+ .indicate_partition_engaged = xpc_indicate_partition_engaged_uv,
+ .indicate_partition_disengaged = xpc_indicate_partition_disengaged_uv,
+ .assume_partition_disengaged = xpc_assume_partition_disengaged_uv,
+ .partition_engaged = xpc_partition_engaged_uv,
+ .any_partition_engaged = xpc_any_partition_engaged_uv,
+
+ .n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_uv,
+ .send_payload = xpc_send_payload_uv,
+ .get_deliverable_payload = xpc_get_deliverable_payload_uv,
+ .received_payload = xpc_received_payload_uv,
+ .notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv,
+};
+
+int
+xpc_init_uv(void)
+{
+ xpc_arch_ops = xpc_arch_ops_uv;
+
+ if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
+ dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
+ XPC_MSG_HDR_MAX_SIZE);
+ return -E2BIG;
+ }
+
+ xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0,
+ XPC_ACTIVATE_IRQ_NAME,
+ xpc_handle_activate_IRQ_uv);
+ if (IS_ERR(xpc_activate_mq_uv))
+ return PTR_ERR(xpc_activate_mq_uv);
+
+ xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0,
+ XPC_NOTIFY_IRQ_NAME,
+ xpc_handle_notify_IRQ_uv);
+ if (IS_ERR(xpc_notify_mq_uv)) {
+ xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
+ return PTR_ERR(xpc_notify_mq_uv);
+ }
+
+ return 0;
+}
+
+void
+xpc_exit_uv(void)
+{
+ xpc_destroy_gru_mq_uv(xpc_notify_mq_uv);
+ xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
+}
diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c
new file mode 100644
index 00000000..3fac67a5
--- /dev/null
+++ b/drivers/misc/sgi-xp/xpnet.c
@@ -0,0 +1,607 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999-2009 Silicon Graphics, Inc. All rights reserved.
+ */
+
+/*
+ * Cross Partition Network Interface (XPNET) support
+ *
+ * XPNET provides a virtual network layered on top of the Cross
+ * Partition communication layer.
+ *
+ * XPNET provides direct point-to-point and broadcast-like support
+ * for an ethernet-like device. The ethernet broadcast medium is
+ * replaced with a point-to-point message structure which passes
+ * pointers to a DMA-capable block that a remote partition should
+ * retrieve and pass to the upper level networking layer.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include "xp.h"
+
+/*
+ * The message payload transferred by XPC.
+ *
+ * buf_pa is the physical address where the DMA should pull from.
+ *
+ * NOTE: for performance reasons, buf_pa should _ALWAYS_ begin on a
+ * cacheline boundary. To accomplish this, we record the number of
+ * bytes from the beginning of the first cacheline to the first useful
+ * byte of the skb (leadin_ignore) and the number of bytes from the
+ * last useful byte of the skb to the end of the last cacheline
+ * (tailout_ignore).
+ *
+ * size is the number of bytes to transfer which includes the skb->len
+ * (useful bytes of the senders skb) plus the leadin and tailout
+ */
+struct xpnet_message {
+ u16 version; /* Version for this message */
+ u16 embedded_bytes; /* #of bytes embedded in XPC message */
+ u32 magic; /* Special number indicating this is xpnet */
+ unsigned long buf_pa; /* phys address of buffer to retrieve */
+ u32 size; /* #of bytes in buffer */
+ u8 leadin_ignore; /* #of bytes to ignore at the beginning */
+ u8 tailout_ignore; /* #of bytes to ignore at the end */
+ unsigned char data; /* body of small packets */
+};
+
+/*
+ * Determine the size of our message, the cacheline aligned size,
+ * and then the number of message will request from XPC.
+ *
+ * XPC expects each message to exist in an individual cacheline.
+ */
+#define XPNET_MSG_SIZE XPC_MSG_PAYLOAD_MAX_SIZE
+#define XPNET_MSG_DATA_MAX \
+ (XPNET_MSG_SIZE - offsetof(struct xpnet_message, data))
+#define XPNET_MSG_NENTRIES (PAGE_SIZE / XPC_MSG_MAX_SIZE)
+
+#define XPNET_MAX_KTHREADS (XPNET_MSG_NENTRIES + 1)
+#define XPNET_MAX_IDLE_KTHREADS (XPNET_MSG_NENTRIES + 1)
+
+/*
+ * Version number of XPNET implementation. XPNET can always talk to versions
+ * with same major #, and never talk to versions with a different version.
+ */
+#define _XPNET_VERSION(_major, _minor) (((_major) << 4) | (_minor))
+#define XPNET_VERSION_MAJOR(_v) ((_v) >> 4)
+#define XPNET_VERSION_MINOR(_v) ((_v) & 0xf)
+
+#define XPNET_VERSION _XPNET_VERSION(1, 0) /* version 1.0 */
+#define XPNET_VERSION_EMBED _XPNET_VERSION(1, 1) /* version 1.1 */
+#define XPNET_MAGIC 0x88786984 /* "XNET" */
+
+#define XPNET_VALID_MSG(_m) \
+ ((XPNET_VERSION_MAJOR(_m->version) == XPNET_VERSION_MAJOR(XPNET_VERSION)) \
+ && (msg->magic == XPNET_MAGIC))
+
+#define XPNET_DEVICE_NAME "xp0"
+
+/*
+ * When messages are queued with xpc_send_notify, a kmalloc'd buffer
+ * of the following type is passed as a notification cookie. When the
+ * notification function is called, we use the cookie to decide
+ * whether all outstanding message sends have completed. The skb can
+ * then be released.
+ */
+struct xpnet_pending_msg {
+ struct sk_buff *skb;
+ atomic_t use_count;
+};
+
+struct net_device *xpnet_device;
+
+/*
+ * When we are notified of other partitions activating, we add them to
+ * our bitmask of partitions to which we broadcast.
+ */
+static unsigned long *xpnet_broadcast_partitions;
+/* protect above */
+static DEFINE_SPINLOCK(xpnet_broadcast_lock);
+
+/*
+ * Since the Block Transfer Engine (BTE) is being used for the transfer
+ * and it relies upon cache-line size transfers, we need to reserve at
+ * least one cache-line for head and tail alignment. The BTE is
+ * limited to 8MB transfers.
+ *
+ * Testing has shown that changing MTU to greater than 64KB has no effect
+ * on TCP as the two sides negotiate a Max Segment Size that is limited
+ * to 64K. Other protocols May use packets greater than this, but for
+ * now, the default is 64KB.
+ */
+#define XPNET_MAX_MTU (0x800000UL - L1_CACHE_BYTES)
+/* 32KB has been determined to be the ideal */
+#define XPNET_DEF_MTU (0x8000UL)
+
+/*
+ * The partid is encapsulated in the MAC address beginning in the following
+ * octet and it consists of two octets.
+ */
+#define XPNET_PARTID_OCTET 2
+
+/* Define the XPNET debug device structures to be used with dev_dbg() et al */
+
+struct device_driver xpnet_dbg_name = {
+ .name = "xpnet"
+};
+
+struct device xpnet_dbg_subname = {
+ .init_name = "", /* set to "" */
+ .driver = &xpnet_dbg_name
+};
+
+struct device *xpnet = &xpnet_dbg_subname;
+
+/*
+ * Packet was recevied by XPC and forwarded to us.
+ */
+static void
+xpnet_receive(short partid, int channel, struct xpnet_message *msg)
+{
+ struct sk_buff *skb;
+ void *dst;
+ enum xp_retval ret;
+
+ if (!XPNET_VALID_MSG(msg)) {
+ /*
+ * Packet with a different XPC version. Ignore.
+ */
+ xpc_received(partid, channel, (void *)msg);
+
+ xpnet_device->stats.rx_errors++;
+
+ return;
+ }
+ dev_dbg(xpnet, "received 0x%lx, %d, %d, %d\n", msg->buf_pa, msg->size,
+ msg->leadin_ignore, msg->tailout_ignore);
+
+ /* reserve an extra cache line */
+ skb = dev_alloc_skb(msg->size + L1_CACHE_BYTES);
+ if (!skb) {
+ dev_err(xpnet, "failed on dev_alloc_skb(%d)\n",
+ msg->size + L1_CACHE_BYTES);
+
+ xpc_received(partid, channel, (void *)msg);
+
+ xpnet_device->stats.rx_errors++;
+
+ return;
+ }
+
+ /*
+ * The allocated skb has some reserved space.
+ * In order to use xp_remote_memcpy(), we need to get the
+ * skb->data pointer moved forward.
+ */
+ skb_reserve(skb, (L1_CACHE_BYTES - ((u64)skb->data &
+ (L1_CACHE_BYTES - 1)) +
+ msg->leadin_ignore));
+
+ /*
+ * Update the tail pointer to indicate data actually
+ * transferred.
+ */
+ skb_put(skb, (msg->size - msg->leadin_ignore - msg->tailout_ignore));
+
+ /*
+ * Move the data over from the other side.
+ */
+ if ((XPNET_VERSION_MINOR(msg->version) == 1) &&
+ (msg->embedded_bytes != 0)) {
+ dev_dbg(xpnet, "copying embedded message. memcpy(0x%p, 0x%p, "
+ "%lu)\n", skb->data, &msg->data,
+ (size_t)msg->embedded_bytes);
+
+ skb_copy_to_linear_data(skb, &msg->data,
+ (size_t)msg->embedded_bytes);
+ } else {
+ dst = (void *)((u64)skb->data & ~(L1_CACHE_BYTES - 1));
+ dev_dbg(xpnet, "transferring buffer to the skb->data area;\n\t"
+ "xp_remote_memcpy(0x%p, 0x%p, %hu)\n", dst,
+ (void *)msg->buf_pa, msg->size);
+
+ ret = xp_remote_memcpy(xp_pa(dst), msg->buf_pa, msg->size);
+ if (ret != xpSuccess) {
+ /*
+ * !!! Need better way of cleaning skb. Currently skb
+ * !!! appears in_use and we can't just call
+ * !!! dev_kfree_skb.
+ */
+ dev_err(xpnet, "xp_remote_memcpy(0x%p, 0x%p, 0x%hx) "
+ "returned error=0x%x\n", dst,
+ (void *)msg->buf_pa, msg->size, ret);
+
+ xpc_received(partid, channel, (void *)msg);
+
+ xpnet_device->stats.rx_errors++;
+
+ return;
+ }
+ }
+
+ dev_dbg(xpnet, "<skb->head=0x%p skb->data=0x%p skb->tail=0x%p "
+ "skb->end=0x%p skb->len=%d\n", (void *)skb->head,
+ (void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb),
+ skb->len);
+
+ skb->protocol = eth_type_trans(skb, xpnet_device);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ dev_dbg(xpnet, "passing skb to network layer\n"
+ "\tskb->head=0x%p skb->data=0x%p skb->tail=0x%p "
+ "skb->end=0x%p skb->len=%d\n",
+ (void *)skb->head, (void *)skb->data, skb_tail_pointer(skb),
+ skb_end_pointer(skb), skb->len);
+
+ xpnet_device->stats.rx_packets++;
+ xpnet_device->stats.rx_bytes += skb->len + ETH_HLEN;
+
+ netif_rx_ni(skb);
+ xpc_received(partid, channel, (void *)msg);
+}
+
+/*
+ * This is the handler which XPC calls during any sort of change in
+ * state or message reception on a connection.
+ */
+static void
+xpnet_connection_activity(enum xp_retval reason, short partid, int channel,
+ void *data, void *key)
+{
+ DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
+ DBUG_ON(channel != XPC_NET_CHANNEL);
+
+ switch (reason) {
+ case xpMsgReceived: /* message received */
+ DBUG_ON(data == NULL);
+
+ xpnet_receive(partid, channel, (struct xpnet_message *)data);
+ break;
+
+ case xpConnected: /* connection completed to a partition */
+ spin_lock_bh(&xpnet_broadcast_lock);
+ __set_bit(partid, xpnet_broadcast_partitions);
+ spin_unlock_bh(&xpnet_broadcast_lock);
+
+ netif_carrier_on(xpnet_device);
+
+ dev_dbg(xpnet, "%s connected to partition %d\n",
+ xpnet_device->name, partid);
+ break;
+
+ default:
+ spin_lock_bh(&xpnet_broadcast_lock);
+ __clear_bit(partid, xpnet_broadcast_partitions);
+ spin_unlock_bh(&xpnet_broadcast_lock);
+
+ if (bitmap_empty((unsigned long *)xpnet_broadcast_partitions,
+ xp_max_npartitions)) {
+ netif_carrier_off(xpnet_device);
+ }
+
+ dev_dbg(xpnet, "%s disconnected from partition %d\n",
+ xpnet_device->name, partid);
+ break;
+ }
+}
+
+static int
+xpnet_dev_open(struct net_device *dev)
+{
+ enum xp_retval ret;
+
+ dev_dbg(xpnet, "calling xpc_connect(%d, 0x%p, NULL, %ld, %ld, %ld, "
+ "%ld)\n", XPC_NET_CHANNEL, xpnet_connection_activity,
+ (unsigned long)XPNET_MSG_SIZE,
+ (unsigned long)XPNET_MSG_NENTRIES,
+ (unsigned long)XPNET_MAX_KTHREADS,
+ (unsigned long)XPNET_MAX_IDLE_KTHREADS);
+
+ ret = xpc_connect(XPC_NET_CHANNEL, xpnet_connection_activity, NULL,
+ XPNET_MSG_SIZE, XPNET_MSG_NENTRIES,
+ XPNET_MAX_KTHREADS, XPNET_MAX_IDLE_KTHREADS);
+ if (ret != xpSuccess) {
+ dev_err(xpnet, "ifconfig up of %s failed on XPC connect, "
+ "ret=%d\n", dev->name, ret);
+
+ return -ENOMEM;
+ }
+
+ dev_dbg(xpnet, "ifconfig up of %s; XPC connected\n", dev->name);
+
+ return 0;
+}
+
+static int
+xpnet_dev_stop(struct net_device *dev)
+{
+ xpc_disconnect(XPC_NET_CHANNEL);
+
+ dev_dbg(xpnet, "ifconfig down of %s; XPC disconnected\n", dev->name);
+
+ return 0;
+}
+
+static int
+xpnet_dev_change_mtu(struct net_device *dev, int new_mtu)
+{
+ /* 68 comes from min TCP+IP+MAC header */
+ if ((new_mtu < 68) || (new_mtu > XPNET_MAX_MTU)) {
+ dev_err(xpnet, "ifconfig %s mtu %d failed; value must be "
+ "between 68 and %ld\n", dev->name, new_mtu,
+ XPNET_MAX_MTU);
+ return -EINVAL;
+ }
+
+ dev->mtu = new_mtu;
+ dev_dbg(xpnet, "ifconfig %s mtu set to %d\n", dev->name, new_mtu);
+ return 0;
+}
+
+/*
+ * Notification that the other end has received the message and
+ * DMA'd the skb information. At this point, they are done with
+ * our side. When all recipients are done processing, we
+ * release the skb and then release our pending message structure.
+ */
+static void
+xpnet_send_completed(enum xp_retval reason, short partid, int channel,
+ void *__qm)
+{
+ struct xpnet_pending_msg *queued_msg = (struct xpnet_pending_msg *)__qm;
+
+ DBUG_ON(queued_msg == NULL);
+
+ dev_dbg(xpnet, "message to %d notified with reason %d\n",
+ partid, reason);
+
+ if (atomic_dec_return(&queued_msg->use_count) == 0) {
+ dev_dbg(xpnet, "all acks for skb->head=-x%p\n",
+ (void *)queued_msg->skb->head);
+
+ dev_kfree_skb_any(queued_msg->skb);
+ kfree(queued_msg);
+ }
+}
+
+static void
+xpnet_send(struct sk_buff *skb, struct xpnet_pending_msg *queued_msg,
+ u64 start_addr, u64 end_addr, u16 embedded_bytes, int dest_partid)
+{
+ u8 msg_buffer[XPNET_MSG_SIZE];
+ struct xpnet_message *msg = (struct xpnet_message *)&msg_buffer;
+ u16 msg_size = sizeof(struct xpnet_message);
+ enum xp_retval ret;
+
+ msg->embedded_bytes = embedded_bytes;
+ if (unlikely(embedded_bytes != 0)) {
+ msg->version = XPNET_VERSION_EMBED;
+ dev_dbg(xpnet, "calling memcpy(0x%p, 0x%p, 0x%lx)\n",
+ &msg->data, skb->data, (size_t)embedded_bytes);
+ skb_copy_from_linear_data(skb, &msg->data,
+ (size_t)embedded_bytes);
+ msg_size += embedded_bytes - 1;
+ } else {
+ msg->version = XPNET_VERSION;
+ }
+ msg->magic = XPNET_MAGIC;
+ msg->size = end_addr - start_addr;
+ msg->leadin_ignore = (u64)skb->data - start_addr;
+ msg->tailout_ignore = end_addr - (u64)skb_tail_pointer(skb);
+ msg->buf_pa = xp_pa((void *)start_addr);
+
+ dev_dbg(xpnet, "sending XPC message to %d:%d\n"
+ "msg->buf_pa=0x%lx, msg->size=%u, "
+ "msg->leadin_ignore=%u, msg->tailout_ignore=%u\n",
+ dest_partid, XPC_NET_CHANNEL, msg->buf_pa, msg->size,
+ msg->leadin_ignore, msg->tailout_ignore);
+
+ atomic_inc(&queued_msg->use_count);
+
+ ret = xpc_send_notify(dest_partid, XPC_NET_CHANNEL, XPC_NOWAIT, msg,
+ msg_size, xpnet_send_completed, queued_msg);
+ if (unlikely(ret != xpSuccess))
+ atomic_dec(&queued_msg->use_count);
+}
+
+/*
+ * Network layer has formatted a packet (skb) and is ready to place it
+ * "on the wire". Prepare and send an xpnet_message to all partitions
+ * which have connected with us and are targets of this packet.
+ *
+ * MAC-NOTE: For the XPNET driver, the MAC address contains the
+ * destination partid. If the destination partid octets are 0xffff,
+ * this packet is to be broadcast to all connected partitions.
+ */
+static int
+xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct xpnet_pending_msg *queued_msg;
+ u64 start_addr, end_addr;
+ short dest_partid;
+ u16 embedded_bytes = 0;
+
+ dev_dbg(xpnet, ">skb->head=0x%p skb->data=0x%p skb->tail=0x%p "
+ "skb->end=0x%p skb->len=%d\n", (void *)skb->head,
+ (void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb),
+ skb->len);
+
+ if (skb->data[0] == 0x33) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK; /* nothing needed to be done */
+ }
+
+ /*
+ * The xpnet_pending_msg tracks how many outstanding
+ * xpc_send_notifies are relying on this skb. When none
+ * remain, release the skb.
+ */
+ queued_msg = kmalloc(sizeof(struct xpnet_pending_msg), GFP_ATOMIC);
+ if (queued_msg == NULL) {
+ dev_warn(xpnet, "failed to kmalloc %ld bytes; dropping "
+ "packet\n", sizeof(struct xpnet_pending_msg));
+
+ dev->stats.tx_errors++;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ /* get the beginning of the first cacheline and end of last */
+ start_addr = ((u64)skb->data & ~(L1_CACHE_BYTES - 1));
+ end_addr = L1_CACHE_ALIGN((u64)skb_tail_pointer(skb));
+
+ /* calculate how many bytes to embed in the XPC message */
+ if (unlikely(skb->len <= XPNET_MSG_DATA_MAX)) {
+ /* skb->data does fit so embed */
+ embedded_bytes = skb->len;
+ }
+
+ /*
+ * Since the send occurs asynchronously, we set the count to one
+ * and begin sending. Any sends that happen to complete before
+ * we are done sending will not free the skb. We will be left
+ * with that task during exit. This also handles the case of
+ * a packet destined for a partition which is no longer up.
+ */
+ atomic_set(&queued_msg->use_count, 1);
+ queued_msg->skb = skb;
+
+ if (skb->data[0] == 0xff) {
+ /* we are being asked to broadcast to all partitions */
+ for_each_set_bit(dest_partid, xpnet_broadcast_partitions,
+ xp_max_npartitions) {
+
+ xpnet_send(skb, queued_msg, start_addr, end_addr,
+ embedded_bytes, dest_partid);
+ }
+ } else {
+ dest_partid = (short)skb->data[XPNET_PARTID_OCTET + 1];
+ dest_partid |= (short)skb->data[XPNET_PARTID_OCTET + 0] << 8;
+
+ if (dest_partid >= 0 &&
+ dest_partid < xp_max_npartitions &&
+ test_bit(dest_partid, xpnet_broadcast_partitions) != 0) {
+
+ xpnet_send(skb, queued_msg, start_addr, end_addr,
+ embedded_bytes, dest_partid);
+ }
+ }
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+
+ if (atomic_dec_return(&queued_msg->use_count) == 0) {
+ dev_kfree_skb(skb);
+ kfree(queued_msg);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+/*
+ * Deal with transmit timeouts coming from the network layer.
+ */
+static void
+xpnet_dev_tx_timeout(struct net_device *dev)
+{
+ dev->stats.tx_errors++;
+}
+
+static const struct net_device_ops xpnet_netdev_ops = {
+ .ndo_open = xpnet_dev_open,
+ .ndo_stop = xpnet_dev_stop,
+ .ndo_start_xmit = xpnet_dev_hard_start_xmit,
+ .ndo_change_mtu = xpnet_dev_change_mtu,
+ .ndo_tx_timeout = xpnet_dev_tx_timeout,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int __init
+xpnet_init(void)
+{
+ int result;
+
+ if (!is_shub() && !is_uv())
+ return -ENODEV;
+
+ dev_info(xpnet, "registering network device %s\n", XPNET_DEVICE_NAME);
+
+ xpnet_broadcast_partitions = kzalloc(BITS_TO_LONGS(xp_max_npartitions) *
+ sizeof(long), GFP_KERNEL);
+ if (xpnet_broadcast_partitions == NULL)
+ return -ENOMEM;
+
+ /*
+ * use ether_setup() to init the majority of our device
+ * structure and then override the necessary pieces.
+ */
+ xpnet_device = alloc_netdev(0, XPNET_DEVICE_NAME, ether_setup);
+ if (xpnet_device == NULL) {
+ kfree(xpnet_broadcast_partitions);
+ return -ENOMEM;
+ }
+
+ netif_carrier_off(xpnet_device);
+
+ xpnet_device->netdev_ops = &xpnet_netdev_ops;
+ xpnet_device->mtu = XPNET_DEF_MTU;
+
+ /*
+ * Multicast assumes the LSB of the first octet is set for multicast
+ * MAC addresses. We chose the first octet of the MAC to be unlikely
+ * to collide with any vendor's officially issued MAC.
+ */
+ xpnet_device->dev_addr[0] = 0x02; /* locally administered, no OUI */
+
+ xpnet_device->dev_addr[XPNET_PARTID_OCTET + 1] = xp_partition_id;
+ xpnet_device->dev_addr[XPNET_PARTID_OCTET + 0] = (xp_partition_id >> 8);
+
+ /*
+ * ether_setup() sets this to a multicast device. We are
+ * really not supporting multicast at this time.
+ */
+ xpnet_device->flags &= ~IFF_MULTICAST;
+
+ /*
+ * No need to checksum as it is a DMA transfer. The BTE will
+ * report an error if the data is not retrievable and the
+ * packet will be dropped.
+ */
+ xpnet_device->features = NETIF_F_HW_CSUM;
+
+ result = register_netdev(xpnet_device);
+ if (result != 0) {
+ free_netdev(xpnet_device);
+ kfree(xpnet_broadcast_partitions);
+ }
+
+ return result;
+}
+
+module_init(xpnet_init);
+
+static void __exit
+xpnet_exit(void)
+{
+ dev_info(xpnet, "unregistering network device %s\n",
+ xpnet_device[0].name);
+
+ unregister_netdev(xpnet_device);
+ free_netdev(xpnet_device);
+ kfree(xpnet_broadcast_partitions);
+}
+
+module_exit(xpnet_exit);
+
+MODULE_AUTHOR("Silicon Graphics, Inc.");
+MODULE_DESCRIPTION("Cross Partition Network adapter (XPNET)");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/spear13xx_pcie_gadget.c b/drivers/misc/spear13xx_pcie_gadget.c
new file mode 100644
index 00000000..123ed98e
--- /dev/null
+++ b/drivers/misc/spear13xx_pcie_gadget.c
@@ -0,0 +1,898 @@
+/*
+ * drivers/misc/spear13xx_pcie_gadget.c
+ *
+ * Copyright (C) 2010 ST Microelectronics
+ * Pratyush Anand<pratyush.anand@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pci_regs.h>
+#include <linux/configfs.h>
+#include <mach/pcie.h>
+#include <mach/misc_regs.h>
+
+#define IN0_MEM_SIZE (200 * 1024 * 1024 - 1)
+/* In current implementation address translation is done using IN0 only.
+ * So IN1 start address and IN0 end address has been kept same
+*/
+#define IN1_MEM_SIZE (0 * 1024 * 1024 - 1)
+#define IN_IO_SIZE (20 * 1024 * 1024 - 1)
+#define IN_CFG0_SIZE (12 * 1024 * 1024 - 1)
+#define IN_CFG1_SIZE (12 * 1024 * 1024 - 1)
+#define IN_MSG_SIZE (12 * 1024 * 1024 - 1)
+/* Keep default BAR size as 4K*/
+/* AORAM would be mapped by default*/
+#define INBOUND_ADDR_MASK (SPEAR13XX_SYSRAM1_SIZE - 1)
+
+#define INT_TYPE_NO_INT 0
+#define INT_TYPE_INTX 1
+#define INT_TYPE_MSI 2
+struct spear_pcie_gadget_config {
+ void __iomem *base;
+ void __iomem *va_app_base;
+ void __iomem *va_dbi_base;
+ char int_type[10];
+ ulong requested_msi;
+ ulong configured_msi;
+ ulong bar0_size;
+ ulong bar0_rw_offset;
+ void __iomem *va_bar0_address;
+};
+
+struct pcie_gadget_target {
+ struct configfs_subsystem subsys;
+ struct spear_pcie_gadget_config config;
+};
+
+struct pcie_gadget_target_attr {
+ struct configfs_attribute attr;
+ ssize_t (*show)(struct spear_pcie_gadget_config *config,
+ char *buf);
+ ssize_t (*store)(struct spear_pcie_gadget_config *config,
+ const char *buf,
+ size_t count);
+};
+
+static void enable_dbi_access(struct pcie_app_reg __iomem *app_reg)
+{
+ /* Enable DBI access */
+ writel(readl(&app_reg->slv_armisc) | (1 << AXI_OP_DBI_ACCESS_ID),
+ &app_reg->slv_armisc);
+ writel(readl(&app_reg->slv_awmisc) | (1 << AXI_OP_DBI_ACCESS_ID),
+ &app_reg->slv_awmisc);
+
+}
+
+static void disable_dbi_access(struct pcie_app_reg __iomem *app_reg)
+{
+ /* disable DBI access */
+ writel(readl(&app_reg->slv_armisc) & ~(1 << AXI_OP_DBI_ACCESS_ID),
+ &app_reg->slv_armisc);
+ writel(readl(&app_reg->slv_awmisc) & ~(1 << AXI_OP_DBI_ACCESS_ID),
+ &app_reg->slv_awmisc);
+
+}
+
+static void spear_dbi_read_reg(struct spear_pcie_gadget_config *config,
+ int where, int size, u32 *val)
+{
+ struct pcie_app_reg __iomem *app_reg = config->va_app_base;
+ ulong va_address;
+
+ /* Enable DBI access */
+ enable_dbi_access(app_reg);
+
+ va_address = (ulong)config->va_dbi_base + (where & ~0x3);
+
+ *val = readl(va_address);
+
+ if (size == 1)
+ *val = (*val >> (8 * (where & 3))) & 0xff;
+ else if (size == 2)
+ *val = (*val >> (8 * (where & 3))) & 0xffff;
+
+ /* Disable DBI access */
+ disable_dbi_access(app_reg);
+}
+
+static void spear_dbi_write_reg(struct spear_pcie_gadget_config *config,
+ int where, int size, u32 val)
+{
+ struct pcie_app_reg __iomem *app_reg = config->va_app_base;
+ ulong va_address;
+
+ /* Enable DBI access */
+ enable_dbi_access(app_reg);
+
+ va_address = (ulong)config->va_dbi_base + (where & ~0x3);
+
+ if (size == 4)
+ writel(val, va_address);
+ else if (size == 2)
+ writew(val, va_address + (where & 2));
+ else if (size == 1)
+ writeb(val, va_address + (where & 3));
+
+ /* Disable DBI access */
+ disable_dbi_access(app_reg);
+}
+
+#define PCI_FIND_CAP_TTL 48
+
+static int pci_find_own_next_cap_ttl(struct spear_pcie_gadget_config *config,
+ u32 pos, int cap, int *ttl)
+{
+ u32 id;
+
+ while ((*ttl)--) {
+ spear_dbi_read_reg(config, pos, 1, &pos);
+ if (pos < 0x40)
+ break;
+ pos &= ~3;
+ spear_dbi_read_reg(config, pos + PCI_CAP_LIST_ID, 1, &id);
+ if (id == 0xff)
+ break;
+ if (id == cap)
+ return pos;
+ pos += PCI_CAP_LIST_NEXT;
+ }
+ return 0;
+}
+
+static int pci_find_own_next_cap(struct spear_pcie_gadget_config *config,
+ u32 pos, int cap)
+{
+ int ttl = PCI_FIND_CAP_TTL;
+
+ return pci_find_own_next_cap_ttl(config, pos, cap, &ttl);
+}
+
+static int pci_find_own_cap_start(struct spear_pcie_gadget_config *config,
+ u8 hdr_type)
+{
+ u32 status;
+
+ spear_dbi_read_reg(config, PCI_STATUS, 2, &status);
+ if (!(status & PCI_STATUS_CAP_LIST))
+ return 0;
+
+ switch (hdr_type) {
+ case PCI_HEADER_TYPE_NORMAL:
+ case PCI_HEADER_TYPE_BRIDGE:
+ return PCI_CAPABILITY_LIST;
+ case PCI_HEADER_TYPE_CARDBUS:
+ return PCI_CB_CAPABILITY_LIST;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+/*
+ * Tell if a device supports a given PCI capability.
+ * Returns the address of the requested capability structure within the
+ * device's PCI configuration space or 0 in case the device does not
+ * support it. Possible values for @cap:
+ *
+ * %PCI_CAP_ID_PM Power Management
+ * %PCI_CAP_ID_AGP Accelerated Graphics Port
+ * %PCI_CAP_ID_VPD Vital Product Data
+ * %PCI_CAP_ID_SLOTID Slot Identification
+ * %PCI_CAP_ID_MSI Message Signalled Interrupts
+ * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
+ * %PCI_CAP_ID_PCIX PCI-X
+ * %PCI_CAP_ID_EXP PCI Express
+ */
+static int pci_find_own_capability(struct spear_pcie_gadget_config *config,
+ int cap)
+{
+ u32 pos;
+ u32 hdr_type;
+
+ spear_dbi_read_reg(config, PCI_HEADER_TYPE, 1, &hdr_type);
+
+ pos = pci_find_own_cap_start(config, hdr_type);
+ if (pos)
+ pos = pci_find_own_next_cap(config, pos, cap);
+
+ return pos;
+}
+
+static irqreturn_t spear_pcie_gadget_irq(int irq, void *dev_id)
+{
+ return 0;
+}
+
+/*
+ * configfs interfaces show/store functions
+ */
+static ssize_t pcie_gadget_show_link(
+ struct spear_pcie_gadget_config *config,
+ char *buf)
+{
+ struct pcie_app_reg __iomem *app_reg = config->va_app_base;
+
+ if (readl(&app_reg->app_status_1) & ((u32)1 << XMLH_LINK_UP_ID))
+ return sprintf(buf, "UP");
+ else
+ return sprintf(buf, "DOWN");
+}
+
+static ssize_t pcie_gadget_store_link(
+ struct spear_pcie_gadget_config *config,
+ const char *buf, size_t count)
+{
+ struct pcie_app_reg __iomem *app_reg = config->va_app_base;
+
+ if (sysfs_streq(buf, "UP"))
+ writel(readl(&app_reg->app_ctrl_0) | (1 << APP_LTSSM_ENABLE_ID),
+ &app_reg->app_ctrl_0);
+ else if (sysfs_streq(buf, "DOWN"))
+ writel(readl(&app_reg->app_ctrl_0)
+ & ~(1 << APP_LTSSM_ENABLE_ID),
+ &app_reg->app_ctrl_0);
+ else
+ return -EINVAL;
+ return count;
+}
+
+static ssize_t pcie_gadget_show_int_type(
+ struct spear_pcie_gadget_config *config,
+ char *buf)
+{
+ return sprintf(buf, "%s", config->int_type);
+}
+
+static ssize_t pcie_gadget_store_int_type(
+ struct spear_pcie_gadget_config *config,
+ const char *buf, size_t count)
+{
+ u32 cap, vec, flags;
+ ulong vector;
+
+ if (sysfs_streq(buf, "INTA"))
+ spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 1);
+
+ else if (sysfs_streq(buf, "MSI")) {
+ vector = config->requested_msi;
+ vec = 0;
+ while (vector > 1) {
+ vector /= 2;
+ vec++;
+ }
+ spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 0);
+ cap = pci_find_own_capability(config, PCI_CAP_ID_MSI);
+ spear_dbi_read_reg(config, cap + PCI_MSI_FLAGS, 1, &flags);
+ flags &= ~PCI_MSI_FLAGS_QMASK;
+ flags |= vec << 1;
+ spear_dbi_write_reg(config, cap + PCI_MSI_FLAGS, 1, flags);
+ } else
+ return -EINVAL;
+
+ strcpy(config->int_type, buf);
+
+ return count;
+}
+
+static ssize_t pcie_gadget_show_no_of_msi(
+ struct spear_pcie_gadget_config *config,
+ char *buf)
+{
+ struct pcie_app_reg __iomem *app_reg = config->va_app_base;
+ u32 cap, vec, flags;
+ ulong vector;
+
+ if ((readl(&app_reg->msg_status) & (1 << CFG_MSI_EN_ID))
+ != (1 << CFG_MSI_EN_ID))
+ vector = 0;
+ else {
+ cap = pci_find_own_capability(config, PCI_CAP_ID_MSI);
+ spear_dbi_read_reg(config, cap + PCI_MSI_FLAGS, 1, &flags);
+ flags &= ~PCI_MSI_FLAGS_QSIZE;
+ vec = flags >> 4;
+ vector = 1;
+ while (vec--)
+ vector *= 2;
+ }
+ config->configured_msi = vector;
+
+ return sprintf(buf, "%lu", vector);
+}
+
+static ssize_t pcie_gadget_store_no_of_msi(
+ struct spear_pcie_gadget_config *config,
+ const char *buf, size_t count)
+{
+ if (strict_strtoul(buf, 0, &config->requested_msi))
+ return -EINVAL;
+ if (config->requested_msi > 32)
+ config->requested_msi = 32;
+
+ return count;
+}
+
+static ssize_t pcie_gadget_store_inta(
+ struct spear_pcie_gadget_config *config,
+ const char *buf, size_t count)
+{
+ struct pcie_app_reg __iomem *app_reg = config->va_app_base;
+ ulong en;
+
+ if (strict_strtoul(buf, 0, &en))
+ return -EINVAL;
+
+ if (en)
+ writel(readl(&app_reg->app_ctrl_0) | (1 << SYS_INT_ID),
+ &app_reg->app_ctrl_0);
+ else
+ writel(readl(&app_reg->app_ctrl_0) & ~(1 << SYS_INT_ID),
+ &app_reg->app_ctrl_0);
+
+ return count;
+}
+
+static ssize_t pcie_gadget_store_send_msi(
+ struct spear_pcie_gadget_config *config,
+ const char *buf, size_t count)
+{
+ struct pcie_app_reg __iomem *app_reg = config->va_app_base;
+ ulong vector;
+ u32 ven_msi;
+
+ if (strict_strtoul(buf, 0, &vector))
+ return -EINVAL;
+
+ if (!config->configured_msi)
+ return -EINVAL;
+
+ if (vector >= config->configured_msi)
+ return -EINVAL;
+
+ ven_msi = readl(&app_reg->ven_msi_1);
+ ven_msi &= ~VEN_MSI_FUN_NUM_MASK;
+ ven_msi |= 0 << VEN_MSI_FUN_NUM_ID;
+ ven_msi &= ~VEN_MSI_TC_MASK;
+ ven_msi |= 0 << VEN_MSI_TC_ID;
+ ven_msi &= ~VEN_MSI_VECTOR_MASK;
+ ven_msi |= vector << VEN_MSI_VECTOR_ID;
+
+ /* generating interrupt for msi vector */
+ ven_msi |= VEN_MSI_REQ_EN;
+ writel(ven_msi, &app_reg->ven_msi_1);
+ udelay(1);
+ ven_msi &= ~VEN_MSI_REQ_EN;
+ writel(ven_msi, &app_reg->ven_msi_1);
+
+ return count;
+}
+
+static ssize_t pcie_gadget_show_vendor_id(
+ struct spear_pcie_gadget_config *config,
+ char *buf)
+{
+ u32 id;
+
+ spear_dbi_read_reg(config, PCI_VENDOR_ID, 2, &id);
+
+ return sprintf(buf, "%x", id);
+}
+
+static ssize_t pcie_gadget_store_vendor_id(
+ struct spear_pcie_gadget_config *config,
+ const char *buf, size_t count)
+{
+ ulong id;
+
+ if (strict_strtoul(buf, 0, &id))
+ return -EINVAL;
+
+ spear_dbi_write_reg(config, PCI_VENDOR_ID, 2, id);
+
+ return count;
+}
+
+static ssize_t pcie_gadget_show_device_id(
+ struct spear_pcie_gadget_config *config,
+ char *buf)
+{
+ u32 id;
+
+ spear_dbi_read_reg(config, PCI_DEVICE_ID, 2, &id);
+
+ return sprintf(buf, "%x", id);
+}
+
+static ssize_t pcie_gadget_store_device_id(
+ struct spear_pcie_gadget_config *config,
+ const char *buf, size_t count)
+{
+ ulong id;
+
+ if (strict_strtoul(buf, 0, &id))
+ return -EINVAL;
+
+ spear_dbi_write_reg(config, PCI_DEVICE_ID, 2, id);
+
+ return count;
+}
+
+static ssize_t pcie_gadget_show_bar0_size(
+ struct spear_pcie_gadget_config *config,
+ char *buf)
+{
+ return sprintf(buf, "%lx", config->bar0_size);
+}
+
+static ssize_t pcie_gadget_store_bar0_size(
+ struct spear_pcie_gadget_config *config,
+ const char *buf, size_t count)
+{
+ ulong size;
+ u32 pos, pos1;
+ u32 no_of_bit = 0;
+
+ if (strict_strtoul(buf, 0, &size))
+ return -EINVAL;
+ /* min bar size is 256 */
+ if (size <= 0x100)
+ size = 0x100;
+ /* max bar size is 1MB*/
+ else if (size >= 0x100000)
+ size = 0x100000;
+ else {
+ pos = 0;
+ pos1 = 0;
+ while (pos < 21) {
+ pos = find_next_bit((ulong *)&size, 21, pos);
+ if (pos != 21)
+ pos1 = pos + 1;
+ pos++;
+ no_of_bit++;
+ }
+ if (no_of_bit == 2)
+ pos1--;
+
+ size = 1 << pos1;
+ }
+ config->bar0_size = size;
+ spear_dbi_write_reg(config, PCIE_BAR0_MASK_REG, 4, size - 1);
+
+ return count;
+}
+
+static ssize_t pcie_gadget_show_bar0_address(
+ struct spear_pcie_gadget_config *config,
+ char *buf)
+{
+ struct pcie_app_reg __iomem *app_reg = config->va_app_base;
+
+ u32 address = readl(&app_reg->pim0_mem_addr_start);
+
+ return sprintf(buf, "%x", address);
+}
+
+static ssize_t pcie_gadget_store_bar0_address(
+ struct spear_pcie_gadget_config *config,
+ const char *buf, size_t count)
+{
+ struct pcie_app_reg __iomem *app_reg = config->va_app_base;
+ ulong address;
+
+ if (strict_strtoul(buf, 0, &address))
+ return -EINVAL;
+
+ address &= ~(config->bar0_size - 1);
+ if (config->va_bar0_address)
+ iounmap(config->va_bar0_address);
+ config->va_bar0_address = ioremap(address, config->bar0_size);
+ if (!config->va_bar0_address)
+ return -ENOMEM;
+
+ writel(address, &app_reg->pim0_mem_addr_start);
+
+ return count;
+}
+
+static ssize_t pcie_gadget_show_bar0_rw_offset(
+ struct spear_pcie_gadget_config *config,
+ char *buf)
+{
+ return sprintf(buf, "%lx", config->bar0_rw_offset);
+}
+
+static ssize_t pcie_gadget_store_bar0_rw_offset(
+ struct spear_pcie_gadget_config *config,
+ const char *buf, size_t count)
+{
+ ulong offset;
+
+ if (strict_strtoul(buf, 0, &offset))
+ return -EINVAL;
+
+ if (offset % 4)
+ return -EINVAL;
+
+ config->bar0_rw_offset = offset;
+
+ return count;
+}
+
+static ssize_t pcie_gadget_show_bar0_data(
+ struct spear_pcie_gadget_config *config,
+ char *buf)
+{
+ ulong data;
+
+ if (!config->va_bar0_address)
+ return -ENOMEM;
+
+ data = readl((ulong)config->va_bar0_address + config->bar0_rw_offset);
+
+ return sprintf(buf, "%lx", data);
+}
+
+static ssize_t pcie_gadget_store_bar0_data(
+ struct spear_pcie_gadget_config *config,
+ const char *buf, size_t count)
+{
+ ulong data;
+
+ if (strict_strtoul(buf, 0, &data))
+ return -EINVAL;
+
+ if (!config->va_bar0_address)
+ return -ENOMEM;
+
+ writel(data, (ulong)config->va_bar0_address + config->bar0_rw_offset);
+
+ return count;
+}
+
+/*
+ * Attribute definitions.
+ */
+
+#define PCIE_GADGET_TARGET_ATTR_RO(_name) \
+static struct pcie_gadget_target_attr pcie_gadget_target_##_name = \
+ __CONFIGFS_ATTR(_name, S_IRUGO, pcie_gadget_show_##_name, NULL)
+
+#define PCIE_GADGET_TARGET_ATTR_WO(_name) \
+static struct pcie_gadget_target_attr pcie_gadget_target_##_name = \
+ __CONFIGFS_ATTR(_name, S_IWUSR, NULL, pcie_gadget_store_##_name)
+
+#define PCIE_GADGET_TARGET_ATTR_RW(_name) \
+static struct pcie_gadget_target_attr pcie_gadget_target_##_name = \
+ __CONFIGFS_ATTR(_name, S_IRUGO | S_IWUSR, pcie_gadget_show_##_name, \
+ pcie_gadget_store_##_name)
+PCIE_GADGET_TARGET_ATTR_RW(link);
+PCIE_GADGET_TARGET_ATTR_RW(int_type);
+PCIE_GADGET_TARGET_ATTR_RW(no_of_msi);
+PCIE_GADGET_TARGET_ATTR_WO(inta);
+PCIE_GADGET_TARGET_ATTR_WO(send_msi);
+PCIE_GADGET_TARGET_ATTR_RW(vendor_id);
+PCIE_GADGET_TARGET_ATTR_RW(device_id);
+PCIE_GADGET_TARGET_ATTR_RW(bar0_size);
+PCIE_GADGET_TARGET_ATTR_RW(bar0_address);
+PCIE_GADGET_TARGET_ATTR_RW(bar0_rw_offset);
+PCIE_GADGET_TARGET_ATTR_RW(bar0_data);
+
+static struct configfs_attribute *pcie_gadget_target_attrs[] = {
+ &pcie_gadget_target_link.attr,
+ &pcie_gadget_target_int_type.attr,
+ &pcie_gadget_target_no_of_msi.attr,
+ &pcie_gadget_target_inta.attr,
+ &pcie_gadget_target_send_msi.attr,
+ &pcie_gadget_target_vendor_id.attr,
+ &pcie_gadget_target_device_id.attr,
+ &pcie_gadget_target_bar0_size.attr,
+ &pcie_gadget_target_bar0_address.attr,
+ &pcie_gadget_target_bar0_rw_offset.attr,
+ &pcie_gadget_target_bar0_data.attr,
+ NULL,
+};
+
+static struct pcie_gadget_target *to_target(struct config_item *item)
+{
+ return item ?
+ container_of(to_configfs_subsystem(to_config_group(item)),
+ struct pcie_gadget_target, subsys) : NULL;
+}
+
+/*
+ * Item operations and type for pcie_gadget_target.
+ */
+
+static ssize_t pcie_gadget_target_attr_show(struct config_item *item,
+ struct configfs_attribute *attr,
+ char *buf)
+{
+ ssize_t ret = -EINVAL;
+ struct pcie_gadget_target *target = to_target(item);
+ struct pcie_gadget_target_attr *t_attr =
+ container_of(attr, struct pcie_gadget_target_attr, attr);
+
+ if (t_attr->show)
+ ret = t_attr->show(&target->config, buf);
+ return ret;
+}
+
+static ssize_t pcie_gadget_target_attr_store(struct config_item *item,
+ struct configfs_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ ssize_t ret = -EINVAL;
+ struct pcie_gadget_target *target = to_target(item);
+ struct pcie_gadget_target_attr *t_attr =
+ container_of(attr, struct pcie_gadget_target_attr, attr);
+
+ if (t_attr->store)
+ ret = t_attr->store(&target->config, buf, count);
+ return ret;
+}
+
+static struct configfs_item_operations pcie_gadget_target_item_ops = {
+ .show_attribute = pcie_gadget_target_attr_show,
+ .store_attribute = pcie_gadget_target_attr_store,
+};
+
+static struct config_item_type pcie_gadget_target_type = {
+ .ct_attrs = pcie_gadget_target_attrs,
+ .ct_item_ops = &pcie_gadget_target_item_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static void spear13xx_pcie_device_init(struct spear_pcie_gadget_config *config)
+{
+ struct pcie_app_reg __iomem *app_reg = config->va_app_base;
+
+ /*setup registers for outbound translation */
+
+ writel(config->base, &app_reg->in0_mem_addr_start);
+ writel(app_reg->in0_mem_addr_start + IN0_MEM_SIZE,
+ &app_reg->in0_mem_addr_limit);
+ writel(app_reg->in0_mem_addr_limit + 1, &app_reg->in1_mem_addr_start);
+ writel(app_reg->in1_mem_addr_start + IN1_MEM_SIZE,
+ &app_reg->in1_mem_addr_limit);
+ writel(app_reg->in1_mem_addr_limit + 1, &app_reg->in_io_addr_start);
+ writel(app_reg->in_io_addr_start + IN_IO_SIZE,
+ &app_reg->in_io_addr_limit);
+ writel(app_reg->in_io_addr_limit + 1, &app_reg->in_cfg0_addr_start);
+ writel(app_reg->in_cfg0_addr_start + IN_CFG0_SIZE,
+ &app_reg->in_cfg0_addr_limit);
+ writel(app_reg->in_cfg0_addr_limit + 1, &app_reg->in_cfg1_addr_start);
+ writel(app_reg->in_cfg1_addr_start + IN_CFG1_SIZE,
+ &app_reg->in_cfg1_addr_limit);
+ writel(app_reg->in_cfg1_addr_limit + 1, &app_reg->in_msg_addr_start);
+ writel(app_reg->in_msg_addr_start + IN_MSG_SIZE,
+ &app_reg->in_msg_addr_limit);
+
+ writel(app_reg->in0_mem_addr_start, &app_reg->pom0_mem_addr_start);
+ writel(app_reg->in1_mem_addr_start, &app_reg->pom1_mem_addr_start);
+ writel(app_reg->in_io_addr_start, &app_reg->pom_io_addr_start);
+
+ /*setup registers for inbound translation */
+
+ /* Keep AORAM mapped at BAR0 as default */
+ config->bar0_size = INBOUND_ADDR_MASK + 1;
+ spear_dbi_write_reg(config, PCIE_BAR0_MASK_REG, 4, INBOUND_ADDR_MASK);
+ spear_dbi_write_reg(config, PCI_BASE_ADDRESS_0, 4, 0xC);
+ config->va_bar0_address = ioremap(SPEAR13XX_SYSRAM1_BASE,
+ config->bar0_size);
+
+ writel(SPEAR13XX_SYSRAM1_BASE, &app_reg->pim0_mem_addr_start);
+ writel(0, &app_reg->pim1_mem_addr_start);
+ writel(INBOUND_ADDR_MASK + 1, &app_reg->mem0_addr_offset_limit);
+
+ writel(0x0, &app_reg->pim_io_addr_start);
+ writel(0x0, &app_reg->pim_io_addr_start);
+ writel(0x0, &app_reg->pim_rom_addr_start);
+
+ writel(DEVICE_TYPE_EP | (1 << MISCTRL_EN_ID)
+ | ((u32)1 << REG_TRANSLATION_ENABLE),
+ &app_reg->app_ctrl_0);
+ /* disable all rx interrupts */
+ writel(0, &app_reg->int_mask);
+
+ /* Select INTA as default*/
+ spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 1);
+}
+
+static int __devinit spear_pcie_gadget_probe(struct platform_device *pdev)
+{
+ struct resource *res0, *res1;
+ unsigned int status = 0;
+ int irq;
+ struct clk *clk;
+ static struct pcie_gadget_target *target;
+ struct spear_pcie_gadget_config *config;
+ struct config_item *cg_item;
+ struct configfs_subsystem *subsys;
+
+ /* get resource for application registers*/
+
+ res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res0) {
+ dev_err(&pdev->dev, "no resource defined\n");
+ return -EBUSY;
+ }
+ if (!request_mem_region(res0->start, resource_size(res0),
+ pdev->name)) {
+ dev_err(&pdev->dev, "pcie gadget region already claimed\n");
+ return -EBUSY;
+ }
+ /* get resource for dbi registers*/
+
+ res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res1) {
+ dev_err(&pdev->dev, "no resource defined\n");
+ goto err_rel_res0;
+ }
+ if (!request_mem_region(res1->start, resource_size(res1),
+ pdev->name)) {
+ dev_err(&pdev->dev, "pcie gadget region already claimed\n");
+ goto err_rel_res0;
+ }
+
+ target = kzalloc(sizeof(*target), GFP_KERNEL);
+ if (!target) {
+ dev_err(&pdev->dev, "out of memory\n");
+ status = -ENOMEM;
+ goto err_rel_res;
+ }
+
+ cg_item = &target->subsys.su_group.cg_item;
+ sprintf(cg_item->ci_namebuf, "pcie_gadget.%d", pdev->id);
+ cg_item->ci_type = &pcie_gadget_target_type;
+ config = &target->config;
+ config->va_app_base = (void __iomem *)ioremap(res0->start,
+ resource_size(res0));
+ if (!config->va_app_base) {
+ dev_err(&pdev->dev, "ioremap fail\n");
+ status = -ENOMEM;
+ goto err_kzalloc;
+ }
+
+ config->base = (void __iomem *)res1->start;
+
+ config->va_dbi_base = (void __iomem *)ioremap(res1->start,
+ resource_size(res1));
+ if (!config->va_dbi_base) {
+ dev_err(&pdev->dev, "ioremap fail\n");
+ status = -ENOMEM;
+ goto err_iounmap_app;
+ }
+
+ dev_set_drvdata(&pdev->dev, target);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "no update irq?\n");
+ status = irq;
+ goto err_iounmap;
+ }
+
+ status = request_irq(irq, spear_pcie_gadget_irq, 0, pdev->name, NULL);
+ if (status) {
+ dev_err(&pdev->dev,
+ "pcie gadget interrupt IRQ%d already claimed\n", irq);
+ goto err_iounmap;
+ }
+
+ /* Register configfs hooks */
+ subsys = &target->subsys;
+ config_group_init(&subsys->su_group);
+ mutex_init(&subsys->su_mutex);
+ status = configfs_register_subsystem(subsys);
+ if (status)
+ goto err_irq;
+
+ /*
+ * init basic pcie application registers
+ * do not enable clock if it is PCIE0.Ideally , all controller should
+ * have been independent from others with respect to clock. But PCIE1
+ * and 2 depends on PCIE0.So PCIE0 clk is provided during board init.
+ */
+ if (pdev->id == 1) {
+ /*
+ * Ideally CFG Clock should have been also enabled here. But
+ * it is done currently during board init routne
+ */
+ clk = clk_get_sys("pcie1", NULL);
+ if (IS_ERR(clk)) {
+ pr_err("%s:couldn't get clk for pcie1\n", __func__);
+ goto err_irq;
+ }
+ if (clk_enable(clk)) {
+ pr_err("%s:couldn't enable clk for pcie1\n", __func__);
+ goto err_irq;
+ }
+ } else if (pdev->id == 2) {
+ /*
+ * Ideally CFG Clock should have been also enabled here. But
+ * it is done currently during board init routne
+ */
+ clk = clk_get_sys("pcie2", NULL);
+ if (IS_ERR(clk)) {
+ pr_err("%s:couldn't get clk for pcie2\n", __func__);
+ goto err_irq;
+ }
+ if (clk_enable(clk)) {
+ pr_err("%s:couldn't enable clk for pcie2\n", __func__);
+ goto err_irq;
+ }
+ }
+ spear13xx_pcie_device_init(config);
+
+ return 0;
+err_irq:
+ free_irq(irq, NULL);
+err_iounmap:
+ iounmap(config->va_dbi_base);
+err_iounmap_app:
+ iounmap(config->va_app_base);
+err_kzalloc:
+ kfree(target);
+err_rel_res:
+ release_mem_region(res1->start, resource_size(res1));
+err_rel_res0:
+ release_mem_region(res0->start, resource_size(res0));
+ return status;
+}
+
+static int __devexit spear_pcie_gadget_remove(struct platform_device *pdev)
+{
+ struct resource *res0, *res1;
+ static struct pcie_gadget_target *target;
+ struct spear_pcie_gadget_config *config;
+ int irq;
+
+ res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ irq = platform_get_irq(pdev, 0);
+ target = dev_get_drvdata(&pdev->dev);
+ config = &target->config;
+
+ free_irq(irq, NULL);
+ iounmap(config->va_dbi_base);
+ iounmap(config->va_app_base);
+ release_mem_region(res1->start, resource_size(res1));
+ release_mem_region(res0->start, resource_size(res0));
+ configfs_unregister_subsystem(&target->subsys);
+ kfree(target);
+
+ return 0;
+}
+
+static void spear_pcie_gadget_shutdown(struct platform_device *pdev)
+{
+}
+
+static struct platform_driver spear_pcie_gadget_driver = {
+ .probe = spear_pcie_gadget_probe,
+ .remove = spear_pcie_gadget_remove,
+ .shutdown = spear_pcie_gadget_shutdown,
+ .driver = {
+ .name = "pcie-gadget-spear",
+ .bus = &platform_bus_type
+ },
+};
+
+module_platform_driver(spear_pcie_gadget_driver);
+
+MODULE_ALIAS("platform:pcie-gadget-spear");
+MODULE_AUTHOR("Pratyush Anand");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/ti-st/Kconfig b/drivers/misc/ti-st/Kconfig
new file mode 100644
index 00000000..abb5de1a
--- /dev/null
+++ b/drivers/misc/ti-st/Kconfig
@@ -0,0 +1,17 @@
+#
+# TI's shared transport line discipline and the protocol
+# drivers (BT, FM and GPS)
+#
+menu "Texas Instruments shared transport line discipline"
+config TI_ST
+ tristate "Shared transport core driver"
+ depends on NET && GPIOLIB
+ select FW_LOADER
+ help
+ This enables the shared transport core driver for TI
+ BT / FM and GPS combo chips. This enables protocol drivers
+ to register themselves with core and send data, the responses
+ are returned to relevant protocol drivers based on their
+ packet types.
+
+endmenu
diff --git a/drivers/misc/ti-st/Makefile b/drivers/misc/ti-st/Makefile
new file mode 100644
index 00000000..78d7ebb1
--- /dev/null
+++ b/drivers/misc/ti-st/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for TI's shared transport line discipline
+# and its protocol drivers (BT, FM, GPS)
+#
+obj-$(CONFIG_TI_ST) += st_drv.o
+st_drv-objs := st_core.o st_kim.o st_ll.o
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
new file mode 100644
index 00000000..2b62232c
--- /dev/null
+++ b/drivers/misc/ti-st/st_core.c
@@ -0,0 +1,890 @@
+/*
+ * Shared Transport Line discipline driver Core
+ * This hooks up ST KIM driver and ST LL driver
+ * Copyright (C) 2009-2010 Texas Instruments
+ * Author: Pavan Savoy <pavan_savoy@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#define pr_fmt(fmt) "(stc): " fmt
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/tty.h>
+
+#include <linux/seq_file.h>
+#include <linux/skbuff.h>
+
+#include <linux/ti_wilink_st.h>
+
+/* function pointer pointing to either,
+ * st_kim_recv during registration to receive fw download responses
+ * st_int_recv after registration to receive proto stack responses
+ */
+void (*st_recv) (void*, const unsigned char*, long);
+
+/********************************************************************/
+static void add_channel_to_table(struct st_data_s *st_gdata,
+ struct st_proto_s *new_proto)
+{
+ pr_info("%s: id %d\n", __func__, new_proto->chnl_id);
+ /* list now has the channel id as index itself */
+ st_gdata->list[new_proto->chnl_id] = new_proto;
+ st_gdata->is_registered[new_proto->chnl_id] = true;
+}
+
+static void remove_channel_from_table(struct st_data_s *st_gdata,
+ struct st_proto_s *proto)
+{
+ pr_info("%s: id %d\n", __func__, proto->chnl_id);
+/* st_gdata->list[proto->chnl_id] = NULL; */
+ st_gdata->is_registered[proto->chnl_id] = false;
+}
+
+/*
+ * called from KIM during firmware download.
+ *
+ * This is a wrapper function to tty->ops->write_room.
+ * It returns number of free space available in
+ * uart tx buffer.
+ */
+int st_get_uart_wr_room(struct st_data_s *st_gdata)
+{
+ struct tty_struct *tty;
+ if (unlikely(st_gdata == NULL || st_gdata->tty == NULL)) {
+ pr_err("tty unavailable to perform write");
+ return -1;
+ }
+ tty = st_gdata->tty;
+ return tty->ops->write_room(tty);
+}
+
+/* can be called in from
+ * -- KIM (during fw download)
+ * -- ST Core (during st_write)
+ *
+ * This is the internal write function - a wrapper
+ * to tty->ops->write
+ */
+int st_int_write(struct st_data_s *st_gdata,
+ const unsigned char *data, int count)
+{
+ struct tty_struct *tty;
+ if (unlikely(st_gdata == NULL || st_gdata->tty == NULL)) {
+ pr_err("tty unavailable to perform write");
+ return -EINVAL;
+ }
+ tty = st_gdata->tty;
+#ifdef VERBOSE
+ print_hex_dump(KERN_DEBUG, "<out<", DUMP_PREFIX_NONE,
+ 16, 1, data, count, 0);
+#endif
+ return tty->ops->write(tty, data, count);
+
+}
+
+/*
+ * push the skb received to relevant
+ * protocol stacks
+ */
+void st_send_frame(unsigned char chnl_id, struct st_data_s *st_gdata)
+{
+ pr_debug(" %s(prot:%d) ", __func__, chnl_id);
+
+ if (unlikely
+ (st_gdata == NULL || st_gdata->rx_skb == NULL
+ || st_gdata->is_registered[chnl_id] == false)) {
+ pr_err("chnl_id %d not registered, no data to send?",
+ chnl_id);
+ kfree_skb(st_gdata->rx_skb);
+ return;
+ }
+ /* this cannot fail
+ * this shouldn't take long
+ * - should be just skb_queue_tail for the
+ * protocol stack driver
+ */
+ if (likely(st_gdata->list[chnl_id]->recv != NULL)) {
+ if (unlikely
+ (st_gdata->list[chnl_id]->recv
+ (st_gdata->list[chnl_id]->priv_data, st_gdata->rx_skb)
+ != 0)) {
+ pr_err(" proto stack %d's ->recv failed", chnl_id);
+ kfree_skb(st_gdata->rx_skb);
+ return;
+ }
+ } else {
+ pr_err(" proto stack %d's ->recv null", chnl_id);
+ kfree_skb(st_gdata->rx_skb);
+ }
+ return;
+}
+
+/**
+ * st_reg_complete -
+ * to call registration complete callbacks
+ * of all protocol stack drivers
+ * This function is being called with spin lock held, protocol drivers are
+ * only expected to complete their waits and do nothing more than that.
+ */
+void st_reg_complete(struct st_data_s *st_gdata, char err)
+{
+ unsigned char i = 0;
+ pr_info(" %s ", __func__);
+ for (i = 0; i < ST_MAX_CHANNELS; i++) {
+ if (likely(st_gdata != NULL &&
+ st_gdata->is_registered[i] == true &&
+ st_gdata->list[i]->reg_complete_cb != NULL)) {
+ st_gdata->list[i]->reg_complete_cb
+ (st_gdata->list[i]->priv_data, err);
+ pr_info("protocol %d's cb sent %d\n", i, err);
+ if (err) { /* cleanup registered protocol */
+ st_gdata->protos_registered--;
+ st_gdata->is_registered[i] = false;
+ }
+ }
+ }
+}
+
+static inline int st_check_data_len(struct st_data_s *st_gdata,
+ unsigned char chnl_id, int len)
+{
+ int room = skb_tailroom(st_gdata->rx_skb);
+
+ pr_debug("len %d room %d", len, room);
+
+ if (!len) {
+ /* Received packet has only packet header and
+ * has zero length payload. So, ask ST CORE to
+ * forward the packet to protocol driver (BT/FM/GPS)
+ */
+ st_send_frame(chnl_id, st_gdata);
+
+ } else if (len > room) {
+ /* Received packet's payload length is larger.
+ * We can't accommodate it in created skb.
+ */
+ pr_err("Data length is too large len %d room %d", len,
+ room);
+ kfree_skb(st_gdata->rx_skb);
+ } else {
+ /* Packet header has non-zero payload length and
+ * we have enough space in created skb. Lets read
+ * payload data */
+ st_gdata->rx_state = ST_W4_DATA;
+ st_gdata->rx_count = len;
+ return len;
+ }
+
+ /* Change ST state to continue to process next
+ * packet */
+ st_gdata->rx_state = ST_W4_PACKET_TYPE;
+ st_gdata->rx_skb = NULL;
+ st_gdata->rx_count = 0;
+ st_gdata->rx_chnl = 0;
+
+ return 0;
+}
+
+/**
+ * st_wakeup_ack - internal function for action when wake-up ack
+ * received
+ */
+static inline void st_wakeup_ack(struct st_data_s *st_gdata,
+ unsigned char cmd)
+{
+ struct sk_buff *waiting_skb;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&st_gdata->lock, flags);
+ /* de-Q from waitQ and Q in txQ now that the
+ * chip is awake
+ */
+ while ((waiting_skb = skb_dequeue(&st_gdata->tx_waitq)))
+ skb_queue_tail(&st_gdata->txq, waiting_skb);
+
+ /* state forwarded to ST LL */
+ st_ll_sleep_state(st_gdata, (unsigned long)cmd);
+ spin_unlock_irqrestore(&st_gdata->lock, flags);
+
+ /* wake up to send the recently copied skbs from waitQ */
+ st_tx_wakeup(st_gdata);
+}
+
+/**
+ * st_int_recv - ST's internal receive function.
+ * Decodes received RAW data and forwards to corresponding
+ * client drivers (Bluetooth,FM,GPS..etc).
+ * This can receive various types of packets,
+ * HCI-Events, ACL, SCO, 4 types of HCI-LL PM packets
+ * CH-8 packets from FM, CH-9 packets from GPS cores.
+ */
+void st_int_recv(void *disc_data,
+ const unsigned char *data, long count)
+{
+ char *ptr;
+ struct st_proto_s *proto;
+ unsigned short payload_len = 0;
+ int len = 0, type = 0;
+ unsigned char *plen;
+ struct st_data_s *st_gdata = (struct st_data_s *)disc_data;
+ unsigned long flags;
+
+ ptr = (char *)data;
+ /* tty_receive sent null ? */
+ if (unlikely(ptr == NULL) || (st_gdata == NULL)) {
+ pr_err(" received null from TTY ");
+ return;
+ }
+
+ pr_debug("count %ld rx_state %ld"
+ "rx_count %ld", count, st_gdata->rx_state,
+ st_gdata->rx_count);
+
+ spin_lock_irqsave(&st_gdata->lock, flags);
+ /* Decode received bytes here */
+ while (count) {
+ if (st_gdata->rx_count) {
+ len = min_t(unsigned int, st_gdata->rx_count, count);
+ memcpy(skb_put(st_gdata->rx_skb, len), ptr, len);
+ st_gdata->rx_count -= len;
+ count -= len;
+ ptr += len;
+
+ if (st_gdata->rx_count)
+ continue;
+
+ /* Check ST RX state machine , where are we? */
+ switch (st_gdata->rx_state) {
+ /* Waiting for complete packet ? */
+ case ST_W4_DATA:
+ pr_debug("Complete pkt received");
+ /* Ask ST CORE to forward
+ * the packet to protocol driver */
+ st_send_frame(st_gdata->rx_chnl, st_gdata);
+
+ st_gdata->rx_state = ST_W4_PACKET_TYPE;
+ st_gdata->rx_skb = NULL;
+ continue;
+ /* parse the header to know details */
+ case ST_W4_HEADER:
+ proto = st_gdata->list[st_gdata->rx_chnl];
+ plen =
+ &st_gdata->rx_skb->data
+ [proto->offset_len_in_hdr];
+ pr_debug("plen pointing to %x\n", *plen);
+ if (proto->len_size == 1)/* 1 byte len field */
+ payload_len = *(unsigned char *)plen;
+ else if (proto->len_size == 2)
+ payload_len =
+ __le16_to_cpu(*(unsigned short *)plen);
+ else
+ pr_info("%s: invalid length "
+ "for id %d\n",
+ __func__, proto->chnl_id);
+ st_check_data_len(st_gdata, proto->chnl_id,
+ payload_len);
+ pr_debug("off %d, pay len %d\n",
+ proto->offset_len_in_hdr, payload_len);
+ continue;
+ } /* end of switch rx_state */
+ }
+
+ /* end of if rx_count */
+ /* Check first byte of packet and identify module
+ * owner (BT/FM/GPS) */
+ switch (*ptr) {
+ case LL_SLEEP_IND:
+ case LL_SLEEP_ACK:
+ case LL_WAKE_UP_IND:
+ pr_debug("PM packet");
+ /* this takes appropriate action based on
+ * sleep state received --
+ */
+ st_ll_sleep_state(st_gdata, *ptr);
+ /* if WAKEUP_IND collides copy from waitq to txq
+ * and assume chip awake
+ */
+ spin_unlock_irqrestore(&st_gdata->lock, flags);
+ if (st_ll_getstate(st_gdata) == ST_LL_AWAKE)
+ st_wakeup_ack(st_gdata, LL_WAKE_UP_ACK);
+ spin_lock_irqsave(&st_gdata->lock, flags);
+
+ ptr++;
+ count--;
+ continue;
+ case LL_WAKE_UP_ACK:
+ pr_debug("PM packet");
+
+ spin_unlock_irqrestore(&st_gdata->lock, flags);
+ /* wake up ack received */
+ st_wakeup_ack(st_gdata, *ptr);
+ spin_lock_irqsave(&st_gdata->lock, flags);
+
+ ptr++;
+ count--;
+ continue;
+ /* Unknow packet? */
+ default:
+ type = *ptr;
+ if (st_gdata->list[type] == NULL) {
+ pr_err("chip/interface misbehavior dropping"
+ " frame starting with 0x%02x", type);
+ goto done;
+
+ }
+ st_gdata->rx_skb = alloc_skb(
+ st_gdata->list[type]->max_frame_size,
+ GFP_ATOMIC);
+ skb_reserve(st_gdata->rx_skb,
+ st_gdata->list[type]->reserve);
+ /* next 2 required for BT only */
+ st_gdata->rx_skb->cb[0] = type; /*pkt_type*/
+ st_gdata->rx_skb->cb[1] = 0; /*incoming*/
+ st_gdata->rx_chnl = *ptr;
+ st_gdata->rx_state = ST_W4_HEADER;
+ st_gdata->rx_count = st_gdata->list[type]->hdr_len;
+ pr_debug("rx_count %ld\n", st_gdata->rx_count);
+ };
+ ptr++;
+ count--;
+ }
+done:
+ spin_unlock_irqrestore(&st_gdata->lock, flags);
+ pr_debug("done %s", __func__);
+ return;
+}
+
+/**
+ * st_int_dequeue - internal de-Q function.
+ * If the previous data set was not written
+ * completely, return that skb which has the pending data.
+ * In normal cases, return top of txq.
+ */
+struct sk_buff *st_int_dequeue(struct st_data_s *st_gdata)
+{
+ struct sk_buff *returning_skb;
+
+ pr_debug("%s", __func__);
+ if (st_gdata->tx_skb != NULL) {
+ returning_skb = st_gdata->tx_skb;
+ st_gdata->tx_skb = NULL;
+ return returning_skb;
+ }
+ return skb_dequeue(&st_gdata->txq);
+}
+
+/**
+ * st_int_enqueue - internal Q-ing function.
+ * Will either Q the skb to txq or the tx_waitq
+ * depending on the ST LL state.
+ * If the chip is asleep, then Q it onto waitq and
+ * wakeup the chip.
+ * txq and waitq needs protection since the other contexts
+ * may be sending data, waking up chip.
+ */
+void st_int_enqueue(struct st_data_s *st_gdata, struct sk_buff *skb)
+{
+ unsigned long flags = 0;
+
+ pr_debug("%s", __func__);
+ spin_lock_irqsave(&st_gdata->lock, flags);
+
+ switch (st_ll_getstate(st_gdata)) {
+ case ST_LL_AWAKE:
+ pr_debug("ST LL is AWAKE, sending normally");
+ skb_queue_tail(&st_gdata->txq, skb);
+ break;
+ case ST_LL_ASLEEP_TO_AWAKE:
+ skb_queue_tail(&st_gdata->tx_waitq, skb);
+ break;
+ case ST_LL_AWAKE_TO_ASLEEP:
+ pr_err("ST LL is illegal state(%ld),"
+ "purging received skb.", st_ll_getstate(st_gdata));
+ kfree_skb(skb);
+ break;
+ case ST_LL_ASLEEP:
+ skb_queue_tail(&st_gdata->tx_waitq, skb);
+ st_ll_wakeup(st_gdata);
+ break;
+ default:
+ pr_err("ST LL is illegal state(%ld),"
+ "purging received skb.", st_ll_getstate(st_gdata));
+ kfree_skb(skb);
+ break;
+ }
+
+ spin_unlock_irqrestore(&st_gdata->lock, flags);
+ pr_debug("done %s", __func__);
+ return;
+}
+
+/*
+ * internal wakeup function
+ * called from either
+ * - TTY layer when write's finished
+ * - st_write (in context of the protocol stack)
+ */
+void st_tx_wakeup(struct st_data_s *st_data)
+{
+ struct sk_buff *skb;
+ unsigned long flags; /* for irq save flags */
+ pr_debug("%s", __func__);
+ /* check for sending & set flag sending here */
+ if (test_and_set_bit(ST_TX_SENDING, &st_data->tx_state)) {
+ pr_debug("ST already sending");
+ /* keep sending */
+ set_bit(ST_TX_WAKEUP, &st_data->tx_state);
+ return;
+ /* TX_WAKEUP will be checked in another
+ * context
+ */
+ }
+ do { /* come back if st_tx_wakeup is set */
+ /* woke-up to write */
+ clear_bit(ST_TX_WAKEUP, &st_data->tx_state);
+ while ((skb = st_int_dequeue(st_data))) {
+ int len;
+ spin_lock_irqsave(&st_data->lock, flags);
+ /* enable wake-up from TTY */
+ set_bit(TTY_DO_WRITE_WAKEUP, &st_data->tty->flags);
+ len = st_int_write(st_data, skb->data, skb->len);
+ skb_pull(skb, len);
+ /* if skb->len = len as expected, skb->len=0 */
+ if (skb->len) {
+ /* would be the next skb to be sent */
+ st_data->tx_skb = skb;
+ spin_unlock_irqrestore(&st_data->lock, flags);
+ break;
+ }
+ kfree_skb(skb);
+ spin_unlock_irqrestore(&st_data->lock, flags);
+ }
+ /* if wake-up is set in another context- restart sending */
+ } while (test_bit(ST_TX_WAKEUP, &st_data->tx_state));
+
+ /* clear flag sending */
+ clear_bit(ST_TX_SENDING, &st_data->tx_state);
+}
+
+/********************************************************************/
+/* functions called from ST KIM
+*/
+void kim_st_list_protocols(struct st_data_s *st_gdata, void *buf)
+{
+ seq_printf(buf, "[%d]\nBT=%c\nFM=%c\nGPS=%c\n",
+ st_gdata->protos_registered,
+ st_gdata->is_registered[0x04] == true ? 'R' : 'U',
+ st_gdata->is_registered[0x08] == true ? 'R' : 'U',
+ st_gdata->is_registered[0x09] == true ? 'R' : 'U');
+}
+
+/********************************************************************/
+/*
+ * functions called from protocol stack drivers
+ * to be EXPORT-ed
+ */
+long st_register(struct st_proto_s *new_proto)
+{
+ struct st_data_s *st_gdata;
+ long err = 0;
+ unsigned long flags = 0;
+
+ st_kim_ref(&st_gdata, 0);
+ pr_info("%s(%d) ", __func__, new_proto->chnl_id);
+ if (st_gdata == NULL || new_proto == NULL || new_proto->recv == NULL
+ || new_proto->reg_complete_cb == NULL) {
+ pr_err("gdata/new_proto/recv or reg_complete_cb not ready");
+ return -EINVAL;
+ }
+
+ if (new_proto->chnl_id >= ST_MAX_CHANNELS) {
+ pr_err("chnl_id %d not supported", new_proto->chnl_id);
+ return -EPROTONOSUPPORT;
+ }
+
+ if (st_gdata->is_registered[new_proto->chnl_id] == true) {
+ pr_err("chnl_id %d already registered", new_proto->chnl_id);
+ return -EALREADY;
+ }
+
+ /* can be from process context only */
+ spin_lock_irqsave(&st_gdata->lock, flags);
+
+ if (test_bit(ST_REG_IN_PROGRESS, &st_gdata->st_state)) {
+ pr_info(" ST_REG_IN_PROGRESS:%d ", new_proto->chnl_id);
+ /* fw download in progress */
+
+ add_channel_to_table(st_gdata, new_proto);
+ st_gdata->protos_registered++;
+ new_proto->write = st_write;
+
+ set_bit(ST_REG_PENDING, &st_gdata->st_state);
+ spin_unlock_irqrestore(&st_gdata->lock, flags);
+ return -EINPROGRESS;
+ } else if (st_gdata->protos_registered == ST_EMPTY) {
+ pr_info(" chnl_id list empty :%d ", new_proto->chnl_id);
+ set_bit(ST_REG_IN_PROGRESS, &st_gdata->st_state);
+ st_recv = st_kim_recv;
+
+ /* enable the ST LL - to set default chip state */
+ st_ll_enable(st_gdata);
+
+ /* release lock previously held - re-locked below */
+ spin_unlock_irqrestore(&st_gdata->lock, flags);
+
+ /* this may take a while to complete
+ * since it involves BT fw download
+ */
+ err = st_kim_start(st_gdata->kim_data);
+ if (err != 0) {
+ clear_bit(ST_REG_IN_PROGRESS, &st_gdata->st_state);
+ if ((st_gdata->protos_registered != ST_EMPTY) &&
+ (test_bit(ST_REG_PENDING, &st_gdata->st_state))) {
+ pr_err(" KIM failure complete callback ");
+ st_reg_complete(st_gdata, err);
+ clear_bit(ST_REG_PENDING, &st_gdata->st_state);
+ }
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&st_gdata->lock, flags);
+
+ clear_bit(ST_REG_IN_PROGRESS, &st_gdata->st_state);
+ st_recv = st_int_recv;
+
+ /* this is where all pending registration
+ * are signalled to be complete by calling callback functions
+ */
+ if ((st_gdata->protos_registered != ST_EMPTY) &&
+ (test_bit(ST_REG_PENDING, &st_gdata->st_state))) {
+ pr_debug(" call reg complete callback ");
+ st_reg_complete(st_gdata, 0);
+ }
+ clear_bit(ST_REG_PENDING, &st_gdata->st_state);
+
+ /* check for already registered once more,
+ * since the above check is old
+ */
+ if (st_gdata->is_registered[new_proto->chnl_id] == true) {
+ pr_err(" proto %d already registered ",
+ new_proto->chnl_id);
+ spin_unlock_irqrestore(&st_gdata->lock, flags);
+ return -EALREADY;
+ }
+
+ add_channel_to_table(st_gdata, new_proto);
+ st_gdata->protos_registered++;
+ new_proto->write = st_write;
+ spin_unlock_irqrestore(&st_gdata->lock, flags);
+ return err;
+ }
+ /* if fw is already downloaded & new stack registers protocol */
+ else {
+ add_channel_to_table(st_gdata, new_proto);
+ st_gdata->protos_registered++;
+ new_proto->write = st_write;
+
+ /* lock already held before entering else */
+ spin_unlock_irqrestore(&st_gdata->lock, flags);
+ return err;
+ }
+ pr_debug("done %s(%d) ", __func__, new_proto->chnl_id);
+}
+EXPORT_SYMBOL_GPL(st_register);
+
+/* to unregister a protocol -
+ * to be called from protocol stack driver
+ */
+long st_unregister(struct st_proto_s *proto)
+{
+ long err = 0;
+ unsigned long flags = 0;
+ struct st_data_s *st_gdata;
+
+ pr_debug("%s: %d ", __func__, proto->chnl_id);
+
+ st_kim_ref(&st_gdata, 0);
+ if (!st_gdata || proto->chnl_id >= ST_MAX_CHANNELS) {
+ pr_err(" chnl_id %d not supported", proto->chnl_id);
+ return -EPROTONOSUPPORT;
+ }
+
+ spin_lock_irqsave(&st_gdata->lock, flags);
+
+ if (st_gdata->is_registered[proto->chnl_id] == false) {
+ pr_err(" chnl_id %d not registered", proto->chnl_id);
+ spin_unlock_irqrestore(&st_gdata->lock, flags);
+ return -EPROTONOSUPPORT;
+ }
+
+ st_gdata->protos_registered--;
+ remove_channel_from_table(st_gdata, proto);
+ spin_unlock_irqrestore(&st_gdata->lock, flags);
+
+ /* paranoid check */
+ if (st_gdata->protos_registered < ST_EMPTY)
+ st_gdata->protos_registered = ST_EMPTY;
+
+ if ((st_gdata->protos_registered == ST_EMPTY) &&
+ (!test_bit(ST_REG_PENDING, &st_gdata->st_state))) {
+ pr_info(" all chnl_ids unregistered ");
+
+ /* stop traffic on tty */
+ if (st_gdata->tty) {
+ tty_ldisc_flush(st_gdata->tty);
+ stop_tty(st_gdata->tty);
+ }
+
+ /* all chnl_ids now unregistered */
+ st_kim_stop(st_gdata->kim_data);
+ /* disable ST LL */
+ st_ll_disable(st_gdata);
+ }
+ return err;
+}
+
+/*
+ * called in protocol stack drivers
+ * via the write function pointer
+ */
+long st_write(struct sk_buff *skb)
+{
+ struct st_data_s *st_gdata;
+ long len;
+
+ st_kim_ref(&st_gdata, 0);
+ if (unlikely(skb == NULL || st_gdata == NULL
+ || st_gdata->tty == NULL)) {
+ pr_err("data/tty unavailable to perform write");
+ return -EINVAL;
+ }
+
+ pr_debug("%d to be written", skb->len);
+ len = skb->len;
+
+ /* st_ll to decide where to enqueue the skb */
+ st_int_enqueue(st_gdata, skb);
+ /* wake up */
+ st_tx_wakeup(st_gdata);
+
+ /* return number of bytes written */
+ return len;
+}
+
+/* for protocols making use of shared transport */
+EXPORT_SYMBOL_GPL(st_unregister);
+
+/********************************************************************/
+/*
+ * functions called from TTY layer
+ */
+static int st_tty_open(struct tty_struct *tty)
+{
+ int err = 0;
+ struct st_data_s *st_gdata;
+ pr_info("%s ", __func__);
+
+ st_kim_ref(&st_gdata, 0);
+ st_gdata->tty = tty;
+ tty->disc_data = st_gdata;
+
+ /* don't do an wakeup for now */
+ clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+
+ /* mem already allocated
+ */
+ tty->receive_room = 65536;
+ /* Flush any pending characters in the driver and discipline. */
+ tty_ldisc_flush(tty);
+ tty_driver_flush_buffer(tty);
+ /*
+ * signal to UIM via KIM that -
+ * installation of N_TI_WL ldisc is complete
+ */
+ st_kim_complete(st_gdata->kim_data);
+ pr_debug("done %s", __func__);
+ return err;
+}
+
+static void st_tty_close(struct tty_struct *tty)
+{
+ unsigned char i = ST_MAX_CHANNELS;
+ unsigned long flags = 0;
+ struct st_data_s *st_gdata = tty->disc_data;
+
+ pr_info("%s ", __func__);
+
+ /* TODO:
+ * if a protocol has been registered & line discipline
+ * un-installed for some reason - what should be done ?
+ */
+ spin_lock_irqsave(&st_gdata->lock, flags);
+ for (i = ST_BT; i < ST_MAX_CHANNELS; i++) {
+ if (st_gdata->is_registered[i] == true)
+ pr_err("%d not un-registered", i);
+ st_gdata->list[i] = NULL;
+ st_gdata->is_registered[i] = false;
+ }
+ st_gdata->protos_registered = 0;
+ spin_unlock_irqrestore(&st_gdata->lock, flags);
+ /*
+ * signal to UIM via KIM that -
+ * N_TI_WL ldisc is un-installed
+ */
+ st_kim_complete(st_gdata->kim_data);
+ st_gdata->tty = NULL;
+ /* Flush any pending characters in the driver and discipline. */
+ tty_ldisc_flush(tty);
+ tty_driver_flush_buffer(tty);
+
+ spin_lock_irqsave(&st_gdata->lock, flags);
+ /* empty out txq and tx_waitq */
+ skb_queue_purge(&st_gdata->txq);
+ skb_queue_purge(&st_gdata->tx_waitq);
+ /* reset the TTY Rx states of ST */
+ st_gdata->rx_count = 0;
+ st_gdata->rx_state = ST_W4_PACKET_TYPE;
+ kfree_skb(st_gdata->rx_skb);
+ st_gdata->rx_skb = NULL;
+ spin_unlock_irqrestore(&st_gdata->lock, flags);
+
+ pr_debug("%s: done ", __func__);
+}
+
+static void st_tty_receive(struct tty_struct *tty, const unsigned char *data,
+ char *tty_flags, int count)
+{
+#ifdef VERBOSE
+ print_hex_dump(KERN_DEBUG, ">in>", DUMP_PREFIX_NONE,
+ 16, 1, data, count, 0);
+#endif
+
+ /*
+ * if fw download is in progress then route incoming data
+ * to KIM for validation
+ */
+ st_recv(tty->disc_data, data, count);
+ pr_debug("done %s", __func__);
+}
+
+/* wake-up function called in from the TTY layer
+ * inside the internal wakeup function will be called
+ */
+static void st_tty_wakeup(struct tty_struct *tty)
+{
+ struct st_data_s *st_gdata = tty->disc_data;
+ pr_debug("%s ", __func__);
+ /* don't do an wakeup for now */
+ clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+
+ /* call our internal wakeup */
+ st_tx_wakeup((void *)st_gdata);
+}
+
+static void st_tty_flush_buffer(struct tty_struct *tty)
+{
+ struct st_data_s *st_gdata = tty->disc_data;
+ pr_debug("%s ", __func__);
+
+ kfree_skb(st_gdata->tx_skb);
+ st_gdata->tx_skb = NULL;
+
+ tty->ops->flush_buffer(tty);
+ return;
+}
+
+static struct tty_ldisc_ops st_ldisc_ops = {
+ .magic = TTY_LDISC_MAGIC,
+ .name = "n_st",
+ .open = st_tty_open,
+ .close = st_tty_close,
+ .receive_buf = st_tty_receive,
+ .write_wakeup = st_tty_wakeup,
+ .flush_buffer = st_tty_flush_buffer,
+ .owner = THIS_MODULE
+};
+
+/********************************************************************/
+int st_core_init(struct st_data_s **core_data)
+{
+ struct st_data_s *st_gdata;
+ long err;
+
+ err = tty_register_ldisc(N_TI_WL, &st_ldisc_ops);
+ if (err) {
+ pr_err("error registering %d line discipline %ld",
+ N_TI_WL, err);
+ return err;
+ }
+ pr_debug("registered n_shared line discipline");
+
+ st_gdata = kzalloc(sizeof(struct st_data_s), GFP_KERNEL);
+ if (!st_gdata) {
+ pr_err("memory allocation failed");
+ err = tty_unregister_ldisc(N_TI_WL);
+ if (err)
+ pr_err("unable to un-register ldisc %ld", err);
+ err = -ENOMEM;
+ return err;
+ }
+
+ /* Initialize ST TxQ and Tx waitQ queue head. All BT/FM/GPS module skb's
+ * will be pushed in this queue for actual transmission.
+ */
+ skb_queue_head_init(&st_gdata->txq);
+ skb_queue_head_init(&st_gdata->tx_waitq);
+
+ /* Locking used in st_int_enqueue() to avoid multiple execution */
+ spin_lock_init(&st_gdata->lock);
+
+ err = st_ll_init(st_gdata);
+ if (err) {
+ pr_err("error during st_ll initialization(%ld)", err);
+ kfree(st_gdata);
+ err = tty_unregister_ldisc(N_TI_WL);
+ if (err)
+ pr_err("unable to un-register ldisc");
+ return err;
+ }
+ *core_data = st_gdata;
+ return 0;
+}
+
+void st_core_exit(struct st_data_s *st_gdata)
+{
+ long err;
+ /* internal module cleanup */
+ err = st_ll_deinit(st_gdata);
+ if (err)
+ pr_err("error during deinit of ST LL %ld", err);
+
+ if (st_gdata != NULL) {
+ /* Free ST Tx Qs and skbs */
+ skb_queue_purge(&st_gdata->txq);
+ skb_queue_purge(&st_gdata->tx_waitq);
+ kfree_skb(st_gdata->rx_skb);
+ kfree_skb(st_gdata->tx_skb);
+ /* TTY ldisc cleanup */
+ err = tty_unregister_ldisc(N_TI_WL);
+ if (err)
+ pr_err("unable to un-register ldisc %ld", err);
+ /* free the global data pointer */
+ kfree(st_gdata);
+ }
+}
+
+
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
new file mode 100644
index 00000000..7c14f8fd
--- /dev/null
+++ b/drivers/misc/ti-st/st_kim.c
@@ -0,0 +1,844 @@
+/*
+ * Shared Transport Line discipline driver Core
+ * Init Manager module responsible for GPIO control
+ * and firmware download
+ * Copyright (C) 2009-2010 Texas Instruments
+ * Author: Pavan Savoy <pavan_savoy@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#define pr_fmt(fmt) "(stk) :" fmt
+#include <linux/platform_device.h>
+#include <linux/jiffies.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/gpio.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/sched.h>
+#include <linux/sysfs.h>
+#include <linux/tty.h>
+
+#include <linux/skbuff.h>
+#include <linux/ti_wilink_st.h>
+#include <linux/module.h>
+
+
+#define MAX_ST_DEVICES 3 /* Imagine 1 on each UART for now */
+static struct platform_device *st_kim_devices[MAX_ST_DEVICES];
+
+/**********************************************************************/
+/* internal functions */
+
+/**
+ * st_get_plat_device -
+ * function which returns the reference to the platform device
+ * requested by id. As of now only 1 such device exists (id=0)
+ * the context requesting for reference can get the id to be
+ * requested by a. The protocol driver which is registering or
+ * b. the tty device which is opened.
+ */
+static struct platform_device *st_get_plat_device(int id)
+{
+ return st_kim_devices[id];
+}
+
+/**
+ * validate_firmware_response -
+ * function to return whether the firmware response was proper
+ * in case of error don't complete so that waiting for proper
+ * response times out
+ */
+void validate_firmware_response(struct kim_data_s *kim_gdata)
+{
+ struct sk_buff *skb = kim_gdata->rx_skb;
+ if (unlikely(skb->data[5] != 0)) {
+ pr_err("no proper response during fw download");
+ pr_err("data6 %x", skb->data[5]);
+ kfree_skb(skb);
+ return; /* keep waiting for the proper response */
+ }
+ /* becos of all the script being downloaded */
+ complete_all(&kim_gdata->kim_rcvd);
+ kfree_skb(skb);
+}
+
+/* check for data len received inside kim_int_recv
+ * most often hit the last case to update state to waiting for data
+ */
+static inline int kim_check_data_len(struct kim_data_s *kim_gdata, int len)
+{
+ register int room = skb_tailroom(kim_gdata->rx_skb);
+
+ pr_debug("len %d room %d", len, room);
+
+ if (!len) {
+ validate_firmware_response(kim_gdata);
+ } else if (len > room) {
+ /* Received packet's payload length is larger.
+ * We can't accommodate it in created skb.
+ */
+ pr_err("Data length is too large len %d room %d", len,
+ room);
+ kfree_skb(kim_gdata->rx_skb);
+ } else {
+ /* Packet header has non-zero payload length and
+ * we have enough space in created skb. Lets read
+ * payload data */
+ kim_gdata->rx_state = ST_W4_DATA;
+ kim_gdata->rx_count = len;
+ return len;
+ }
+
+ /* Change ST LL state to continue to process next
+ * packet */
+ kim_gdata->rx_state = ST_W4_PACKET_TYPE;
+ kim_gdata->rx_skb = NULL;
+ kim_gdata->rx_count = 0;
+
+ return 0;
+}
+
+/**
+ * kim_int_recv - receive function called during firmware download
+ * firmware download responses on different UART drivers
+ * have been observed to come in bursts of different
+ * tty_receive and hence the logic
+ */
+void kim_int_recv(struct kim_data_s *kim_gdata,
+ const unsigned char *data, long count)
+{
+ const unsigned char *ptr;
+ int len = 0, type = 0;
+ unsigned char *plen;
+
+ pr_debug("%s", __func__);
+ /* Decode received bytes here */
+ ptr = data;
+ if (unlikely(ptr == NULL)) {
+ pr_err(" received null from TTY ");
+ return;
+ }
+
+ while (count) {
+ if (kim_gdata->rx_count) {
+ len = min_t(unsigned int, kim_gdata->rx_count, count);
+ memcpy(skb_put(kim_gdata->rx_skb, len), ptr, len);
+ kim_gdata->rx_count -= len;
+ count -= len;
+ ptr += len;
+
+ if (kim_gdata->rx_count)
+ continue;
+
+ /* Check ST RX state machine , where are we? */
+ switch (kim_gdata->rx_state) {
+ /* Waiting for complete packet ? */
+ case ST_W4_DATA:
+ pr_debug("Complete pkt received");
+ validate_firmware_response(kim_gdata);
+ kim_gdata->rx_state = ST_W4_PACKET_TYPE;
+ kim_gdata->rx_skb = NULL;
+ continue;
+ /* Waiting for Bluetooth event header ? */
+ case ST_W4_HEADER:
+ plen =
+ (unsigned char *)&kim_gdata->rx_skb->data[1];
+ pr_debug("event hdr: plen 0x%02x\n", *plen);
+ kim_check_data_len(kim_gdata, *plen);
+ continue;
+ } /* end of switch */
+ } /* end of if rx_state */
+ switch (*ptr) {
+ /* Bluetooth event packet? */
+ case 0x04:
+ kim_gdata->rx_state = ST_W4_HEADER;
+ kim_gdata->rx_count = 2;
+ type = *ptr;
+ break;
+ default:
+ pr_info("unknown packet");
+ ptr++;
+ count--;
+ continue;
+ }
+ ptr++;
+ count--;
+ kim_gdata->rx_skb =
+ alloc_skb(1024+8, GFP_ATOMIC);
+ if (!kim_gdata->rx_skb) {
+ pr_err("can't allocate mem for new packet");
+ kim_gdata->rx_state = ST_W4_PACKET_TYPE;
+ kim_gdata->rx_count = 0;
+ return;
+ }
+ skb_reserve(kim_gdata->rx_skb, 8);
+ kim_gdata->rx_skb->cb[0] = 4;
+ kim_gdata->rx_skb->cb[1] = 0;
+
+ }
+ return;
+}
+
+static long read_local_version(struct kim_data_s *kim_gdata, char *bts_scr_name)
+{
+ unsigned short version = 0, chip = 0, min_ver = 0, maj_ver = 0;
+ const char read_ver_cmd[] = { 0x01, 0x01, 0x10, 0x00 };
+
+ pr_debug("%s", __func__);
+
+ INIT_COMPLETION(kim_gdata->kim_rcvd);
+ if (4 != st_int_write(kim_gdata->core_data, read_ver_cmd, 4)) {
+ pr_err("kim: couldn't write 4 bytes");
+ return -EIO;
+ }
+
+ if (!wait_for_completion_timeout
+ (&kim_gdata->kim_rcvd, msecs_to_jiffies(CMD_RESP_TIME))) {
+ pr_err(" waiting for ver info- timed out ");
+ return -ETIMEDOUT;
+ }
+ INIT_COMPLETION(kim_gdata->kim_rcvd);
+
+ version =
+ MAKEWORD(kim_gdata->resp_buffer[13],
+ kim_gdata->resp_buffer[14]);
+ chip = (version & 0x7C00) >> 10;
+ min_ver = (version & 0x007F);
+ maj_ver = (version & 0x0380) >> 7;
+
+ if (version & 0x8000)
+ maj_ver |= 0x0008;
+
+ sprintf(bts_scr_name, "TIInit_%d.%d.%d.bts", chip, maj_ver, min_ver);
+
+ /* to be accessed later via sysfs entry */
+ kim_gdata->version.full = version;
+ kim_gdata->version.chip = chip;
+ kim_gdata->version.maj_ver = maj_ver;
+ kim_gdata->version.min_ver = min_ver;
+
+ pr_info("%s", bts_scr_name);
+ return 0;
+}
+
+void skip_change_remote_baud(unsigned char **ptr, long *len)
+{
+ unsigned char *nxt_action, *cur_action;
+ cur_action = *ptr;
+
+ nxt_action = cur_action + sizeof(struct bts_action) +
+ ((struct bts_action *) cur_action)->size;
+
+ if (((struct bts_action *) nxt_action)->type != ACTION_WAIT_EVENT) {
+ pr_err("invalid action after change remote baud command");
+ } else {
+ *ptr = *ptr + sizeof(struct bts_action) +
+ ((struct bts_action *)cur_action)->size;
+ *len = *len - (sizeof(struct bts_action) +
+ ((struct bts_action *)cur_action)->size);
+ /* warn user on not commenting these in firmware */
+ pr_warn("skipping the wait event of change remote baud");
+ }
+}
+
+/**
+ * download_firmware -
+ * internal function which parses through the .bts firmware
+ * script file intreprets SEND, DELAY actions only as of now
+ */
+static long download_firmware(struct kim_data_s *kim_gdata)
+{
+ long err = 0;
+ long len = 0;
+ unsigned char *ptr = NULL;
+ unsigned char *action_ptr = NULL;
+ unsigned char bts_scr_name[30] = { 0 }; /* 30 char long bts scr name? */
+ int wr_room_space;
+ int cmd_size;
+ unsigned long timeout;
+
+ err = read_local_version(kim_gdata, bts_scr_name);
+ if (err != 0) {
+ pr_err("kim: failed to read local ver");
+ return err;
+ }
+ err =
+ request_firmware(&kim_gdata->fw_entry, bts_scr_name,
+ &kim_gdata->kim_pdev->dev);
+ if (unlikely((err != 0) || (kim_gdata->fw_entry->data == NULL) ||
+ (kim_gdata->fw_entry->size == 0))) {
+ pr_err(" request_firmware failed(errno %ld) for %s", err,
+ bts_scr_name);
+ return -EINVAL;
+ }
+ ptr = (void *)kim_gdata->fw_entry->data;
+ len = kim_gdata->fw_entry->size;
+ /* bts_header to remove out magic number and
+ * version
+ */
+ ptr += sizeof(struct bts_header);
+ len -= sizeof(struct bts_header);
+
+ while (len > 0 && ptr) {
+ pr_debug(" action size %d, type %d ",
+ ((struct bts_action *)ptr)->size,
+ ((struct bts_action *)ptr)->type);
+
+ switch (((struct bts_action *)ptr)->type) {
+ case ACTION_SEND_COMMAND: /* action send */
+ pr_debug("S");
+ action_ptr = &(((struct bts_action *)ptr)->data[0]);
+ if (unlikely
+ (((struct hci_command *)action_ptr)->opcode ==
+ 0xFF36)) {
+ /* ignore remote change
+ * baud rate HCI VS command */
+ pr_warn("change remote baud"
+ " rate command in firmware");
+ skip_change_remote_baud(&ptr, &len);
+ break;
+ }
+ /*
+ * Make sure we have enough free space in uart
+ * tx buffer to write current firmware command
+ */
+ cmd_size = ((struct bts_action *)ptr)->size;
+ timeout = jiffies + msecs_to_jiffies(CMD_WR_TIME);
+ do {
+ wr_room_space =
+ st_get_uart_wr_room(kim_gdata->core_data);
+ if (wr_room_space < 0) {
+ pr_err("Unable to get free "
+ "space info from uart tx buffer");
+ release_firmware(kim_gdata->fw_entry);
+ return wr_room_space;
+ }
+ mdelay(1); /* wait 1ms before checking room */
+ } while ((wr_room_space < cmd_size) &&
+ time_before(jiffies, timeout));
+
+ /* Timeout happened ? */
+ if (time_after_eq(jiffies, timeout)) {
+ pr_err("Timeout while waiting for free "
+ "free space in uart tx buffer");
+ release_firmware(kim_gdata->fw_entry);
+ return -ETIMEDOUT;
+ }
+ /* reinit completion before sending for the
+ * relevant wait
+ */
+ INIT_COMPLETION(kim_gdata->kim_rcvd);
+
+ /*
+ * Free space found in uart buffer, call st_int_write
+ * to send current firmware command to the uart tx
+ * buffer.
+ */
+ err = st_int_write(kim_gdata->core_data,
+ ((struct bts_action_send *)action_ptr)->data,
+ ((struct bts_action *)ptr)->size);
+ if (unlikely(err < 0)) {
+ release_firmware(kim_gdata->fw_entry);
+ return err;
+ }
+ /*
+ * Check number of bytes written to the uart tx buffer
+ * and requested command write size
+ */
+ if (err != cmd_size) {
+ pr_err("Number of bytes written to uart "
+ "tx buffer are not matching with "
+ "requested cmd write size");
+ release_firmware(kim_gdata->fw_entry);
+ return -EIO;
+ }
+ break;
+ case ACTION_WAIT_EVENT: /* wait */
+ pr_debug("W");
+ if (!wait_for_completion_timeout
+ (&kim_gdata->kim_rcvd,
+ msecs_to_jiffies(CMD_RESP_TIME))) {
+ pr_err("response timeout during fw download ");
+ /* timed out */
+ release_firmware(kim_gdata->fw_entry);
+ return -ETIMEDOUT;
+ }
+ INIT_COMPLETION(kim_gdata->kim_rcvd);
+ break;
+ case ACTION_DELAY: /* sleep */
+ pr_info("sleep command in scr");
+ action_ptr = &(((struct bts_action *)ptr)->data[0]);
+ mdelay(((struct bts_action_delay *)action_ptr)->msec);
+ break;
+ }
+ len =
+ len - (sizeof(struct bts_action) +
+ ((struct bts_action *)ptr)->size);
+ ptr =
+ ptr + sizeof(struct bts_action) +
+ ((struct bts_action *)ptr)->size;
+ }
+ /* fw download complete */
+ release_firmware(kim_gdata->fw_entry);
+ return 0;
+}
+
+/**********************************************************************/
+/* functions called from ST core */
+/* called from ST Core, when REG_IN_PROGRESS (registration in progress)
+ * can be because of
+ * 1. response to read local version
+ * 2. during send/recv's of firmware download
+ */
+void st_kim_recv(void *disc_data, const unsigned char *data, long count)
+{
+ struct st_data_s *st_gdata = (struct st_data_s *)disc_data;
+ struct kim_data_s *kim_gdata = st_gdata->kim_data;
+
+ /* copy to local buffer */
+ if (unlikely(data[4] == 0x01 && data[5] == 0x10 && data[0] == 0x04)) {
+ /* must be the read_ver_cmd */
+ memcpy(kim_gdata->resp_buffer, data, count);
+ complete_all(&kim_gdata->kim_rcvd);
+ return;
+ } else {
+ kim_int_recv(kim_gdata, data, count);
+ /* either completes or times out */
+ }
+ return;
+}
+
+/* to signal completion of line discipline installation
+ * called from ST Core, upon tty_open
+ */
+void st_kim_complete(void *kim_data)
+{
+ struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data;
+ complete(&kim_gdata->ldisc_installed);
+}
+
+/**
+ * st_kim_start - called from ST Core upon 1st registration
+ * This involves toggling the chip enable gpio, reading
+ * the firmware version from chip, forming the fw file name
+ * based on the chip version, requesting the fw, parsing it
+ * and perform download(send/recv).
+ */
+long st_kim_start(void *kim_data)
+{
+ long err = 0;
+ long retry = POR_RETRY_COUNT;
+ struct ti_st_plat_data *pdata;
+ struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data;
+
+ pr_info(" %s", __func__);
+ pdata = kim_gdata->kim_pdev->dev.platform_data;
+
+ do {
+ /* platform specific enabling code here */
+ if (pdata->chip_enable)
+ pdata->chip_enable(kim_gdata);
+
+ /* Configure BT nShutdown to HIGH state */
+ gpio_set_value(kim_gdata->nshutdown, GPIO_LOW);
+ mdelay(5); /* FIXME: a proper toggle */
+ gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH);
+ mdelay(100);
+ /* re-initialize the completion */
+ INIT_COMPLETION(kim_gdata->ldisc_installed);
+ /* send notification to UIM */
+ kim_gdata->ldisc_install = 1;
+ pr_info("ldisc_install = 1");
+ sysfs_notify(&kim_gdata->kim_pdev->dev.kobj,
+ NULL, "install");
+ /* wait for ldisc to be installed */
+ err = wait_for_completion_timeout(&kim_gdata->ldisc_installed,
+ msecs_to_jiffies(LDISC_TIME));
+ if (!err) {
+ /* ldisc installation timeout,
+ * flush uart, power cycle BT_EN */
+ pr_err("ldisc installation timeout");
+ err = st_kim_stop(kim_gdata);
+ continue;
+ } else {
+ /* ldisc installed now */
+ pr_info("line discipline installed");
+ err = download_firmware(kim_gdata);
+ if (err != 0) {
+ /* ldisc installed but fw download failed,
+ * flush uart & power cycle BT_EN */
+ pr_err("download firmware failed");
+ err = st_kim_stop(kim_gdata);
+ continue;
+ } else { /* on success don't retry */
+ break;
+ }
+ }
+ } while (retry--);
+ return err;
+}
+
+/**
+ * st_kim_stop - stop communication with chip.
+ * This can be called from ST Core/KIM, on the-
+ * (a) last un-register when chip need not be powered there-after,
+ * (b) upon failure to either install ldisc or download firmware.
+ * The function is responsible to (a) notify UIM about un-installation,
+ * (b) flush UART if the ldisc was installed.
+ * (c) reset BT_EN - pull down nshutdown at the end.
+ * (d) invoke platform's chip disabling routine.
+ */
+long st_kim_stop(void *kim_data)
+{
+ long err = 0;
+ struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data;
+ struct ti_st_plat_data *pdata =
+ kim_gdata->kim_pdev->dev.platform_data;
+ struct tty_struct *tty = kim_gdata->core_data->tty;
+
+ INIT_COMPLETION(kim_gdata->ldisc_installed);
+
+ if (tty) { /* can be called before ldisc is installed */
+ /* Flush any pending characters in the driver and discipline. */
+ tty_ldisc_flush(tty);
+ tty_driver_flush_buffer(tty);
+ tty->ops->flush_buffer(tty);
+ }
+
+ /* send uninstall notification to UIM */
+ pr_info("ldisc_install = 0");
+ kim_gdata->ldisc_install = 0;
+ sysfs_notify(&kim_gdata->kim_pdev->dev.kobj, NULL, "install");
+
+ /* wait for ldisc to be un-installed */
+ err = wait_for_completion_timeout(&kim_gdata->ldisc_installed,
+ msecs_to_jiffies(LDISC_TIME));
+ if (!err) { /* timeout */
+ pr_err(" timed out waiting for ldisc to be un-installed");
+ return -ETIMEDOUT;
+ }
+
+ /* By default configure BT nShutdown to LOW state */
+ gpio_set_value(kim_gdata->nshutdown, GPIO_LOW);
+ mdelay(1);
+ gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH);
+ mdelay(1);
+ gpio_set_value(kim_gdata->nshutdown, GPIO_LOW);
+
+ /* platform specific disable */
+ if (pdata->chip_disable)
+ pdata->chip_disable(kim_gdata);
+ return err;
+}
+
+/**********************************************************************/
+/* functions called from subsystems */
+/* called when debugfs entry is read from */
+
+static int show_version(struct seq_file *s, void *unused)
+{
+ struct kim_data_s *kim_gdata = (struct kim_data_s *)s->private;
+ seq_printf(s, "%04X %d.%d.%d\n", kim_gdata->version.full,
+ kim_gdata->version.chip, kim_gdata->version.maj_ver,
+ kim_gdata->version.min_ver);
+ return 0;
+}
+
+static int show_list(struct seq_file *s, void *unused)
+{
+ struct kim_data_s *kim_gdata = (struct kim_data_s *)s->private;
+ kim_st_list_protocols(kim_gdata->core_data, s);
+ return 0;
+}
+
+static ssize_t show_install(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kim_data_s *kim_data = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", kim_data->ldisc_install);
+}
+
+#ifdef DEBUG
+static ssize_t store_dev_name(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct kim_data_s *kim_data = dev_get_drvdata(dev);
+ pr_debug("storing dev name >%s<", buf);
+ strncpy(kim_data->dev_name, buf, count);
+ pr_debug("stored dev name >%s<", kim_data->dev_name);
+ return count;
+}
+
+static ssize_t store_baud_rate(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct kim_data_s *kim_data = dev_get_drvdata(dev);
+ pr_debug("storing baud rate >%s<", buf);
+ sscanf(buf, "%ld", &kim_data->baud_rate);
+ pr_debug("stored baud rate >%ld<", kim_data->baud_rate);
+ return count;
+}
+#endif /* if DEBUG */
+
+static ssize_t show_dev_name(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kim_data_s *kim_data = dev_get_drvdata(dev);
+ return sprintf(buf, "%s\n", kim_data->dev_name);
+}
+
+static ssize_t show_baud_rate(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kim_data_s *kim_data = dev_get_drvdata(dev);
+ return sprintf(buf, "%ld\n", kim_data->baud_rate);
+}
+
+static ssize_t show_flow_cntrl(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kim_data_s *kim_data = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", kim_data->flow_cntrl);
+}
+
+/* structures specific for sysfs entries */
+static struct kobj_attribute ldisc_install =
+__ATTR(install, 0444, (void *)show_install, NULL);
+
+static struct kobj_attribute uart_dev_name =
+#ifdef DEBUG /* TODO: move this to debug-fs if possible */
+__ATTR(dev_name, 0644, (void *)show_dev_name, (void *)store_dev_name);
+#else
+__ATTR(dev_name, 0444, (void *)show_dev_name, NULL);
+#endif
+
+static struct kobj_attribute uart_baud_rate =
+#ifdef DEBUG /* TODO: move to debugfs */
+__ATTR(baud_rate, 0644, (void *)show_baud_rate, (void *)store_baud_rate);
+#else
+__ATTR(baud_rate, 0444, (void *)show_baud_rate, NULL);
+#endif
+
+static struct kobj_attribute uart_flow_cntrl =
+__ATTR(flow_cntrl, 0444, (void *)show_flow_cntrl, NULL);
+
+static struct attribute *uim_attrs[] = {
+ &ldisc_install.attr,
+ &uart_dev_name.attr,
+ &uart_baud_rate.attr,
+ &uart_flow_cntrl.attr,
+ NULL,
+};
+
+static struct attribute_group uim_attr_grp = {
+ .attrs = uim_attrs,
+};
+
+/**
+ * st_kim_ref - reference the core's data
+ * This references the per-ST platform device in the arch/xx/
+ * board-xx.c file.
+ * This would enable multiple such platform devices to exist
+ * on a given platform
+ */
+void st_kim_ref(struct st_data_s **core_data, int id)
+{
+ struct platform_device *pdev;
+ struct kim_data_s *kim_gdata;
+ /* get kim_gdata reference from platform device */
+ pdev = st_get_plat_device(id);
+ if (!pdev) {
+ *core_data = NULL;
+ return;
+ }
+ kim_gdata = dev_get_drvdata(&pdev->dev);
+ *core_data = kim_gdata->core_data;
+}
+
+static int kim_version_open(struct inode *i, struct file *f)
+{
+ return single_open(f, show_version, i->i_private);
+}
+
+static int kim_list_open(struct inode *i, struct file *f)
+{
+ return single_open(f, show_list, i->i_private);
+}
+
+static const struct file_operations version_debugfs_fops = {
+ /* version info */
+ .open = kim_version_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+static const struct file_operations list_debugfs_fops = {
+ /* protocols info */
+ .open = kim_list_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/**********************************************************************/
+/* functions called from platform device driver subsystem
+ * need to have a relevant platform device entry in the platform's
+ * board-*.c file
+ */
+
+struct dentry *kim_debugfs_dir;
+static int kim_probe(struct platform_device *pdev)
+{
+ long status;
+ struct kim_data_s *kim_gdata;
+ struct ti_st_plat_data *pdata = pdev->dev.platform_data;
+
+ if ((pdev->id != -1) && (pdev->id < MAX_ST_DEVICES)) {
+ /* multiple devices could exist */
+ st_kim_devices[pdev->id] = pdev;
+ } else {
+ /* platform's sure about existence of 1 device */
+ st_kim_devices[0] = pdev;
+ }
+
+ kim_gdata = kzalloc(sizeof(struct kim_data_s), GFP_ATOMIC);
+ if (!kim_gdata) {
+ pr_err("no mem to allocate");
+ return -ENOMEM;
+ }
+ dev_set_drvdata(&pdev->dev, kim_gdata);
+
+ status = st_core_init(&kim_gdata->core_data);
+ if (status != 0) {
+ pr_err(" ST core init failed");
+ return -EIO;
+ }
+ /* refer to itself */
+ kim_gdata->core_data->kim_data = kim_gdata;
+
+ /* Claim the chip enable nShutdown gpio from the system */
+ kim_gdata->nshutdown = pdata->nshutdown_gpio;
+ status = gpio_request(kim_gdata->nshutdown, "kim");
+ if (unlikely(status)) {
+ pr_err(" gpio %ld request failed ", kim_gdata->nshutdown);
+ return status;
+ }
+
+ /* Configure nShutdown GPIO as output=0 */
+ status = gpio_direction_output(kim_gdata->nshutdown, 0);
+ if (unlikely(status)) {
+ pr_err(" unable to configure gpio %ld", kim_gdata->nshutdown);
+ return status;
+ }
+ /* get reference of pdev for request_firmware
+ */
+ kim_gdata->kim_pdev = pdev;
+ init_completion(&kim_gdata->kim_rcvd);
+ init_completion(&kim_gdata->ldisc_installed);
+
+ status = sysfs_create_group(&pdev->dev.kobj, &uim_attr_grp);
+ if (status) {
+ pr_err("failed to create sysfs entries");
+ return status;
+ }
+
+ /* copying platform data */
+ strncpy(kim_gdata->dev_name, pdata->dev_name, UART_DEV_NAME_LEN);
+ kim_gdata->flow_cntrl = pdata->flow_cntrl;
+ kim_gdata->baud_rate = pdata->baud_rate;
+ pr_info("sysfs entries created\n");
+
+ kim_debugfs_dir = debugfs_create_dir("ti-st", NULL);
+ if (IS_ERR(kim_debugfs_dir)) {
+ pr_err(" debugfs entries creation failed ");
+ kim_debugfs_dir = NULL;
+ return -EIO;
+ }
+
+ debugfs_create_file("version", S_IRUGO, kim_debugfs_dir,
+ kim_gdata, &version_debugfs_fops);
+ debugfs_create_file("protocols", S_IRUGO, kim_debugfs_dir,
+ kim_gdata, &list_debugfs_fops);
+ pr_info(" debugfs entries created ");
+ return 0;
+}
+
+static int kim_remove(struct platform_device *pdev)
+{
+ /* free the GPIOs requested */
+ struct ti_st_plat_data *pdata = pdev->dev.platform_data;
+ struct kim_data_s *kim_gdata;
+
+ kim_gdata = dev_get_drvdata(&pdev->dev);
+
+ /* Free the Bluetooth/FM/GPIO
+ * nShutdown gpio from the system
+ */
+ gpio_free(pdata->nshutdown_gpio);
+ pr_info("nshutdown GPIO Freed");
+
+ debugfs_remove_recursive(kim_debugfs_dir);
+ sysfs_remove_group(&pdev->dev.kobj, &uim_attr_grp);
+ pr_info("sysfs entries removed");
+
+ kim_gdata->kim_pdev = NULL;
+ st_core_exit(kim_gdata->core_data);
+
+ kfree(kim_gdata);
+ kim_gdata = NULL;
+ return 0;
+}
+
+int kim_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct ti_st_plat_data *pdata = pdev->dev.platform_data;
+
+ if (pdata->suspend)
+ return pdata->suspend(pdev, state);
+
+ return -EOPNOTSUPP;
+}
+
+int kim_resume(struct platform_device *pdev)
+{
+ struct ti_st_plat_data *pdata = pdev->dev.platform_data;
+
+ if (pdata->resume)
+ return pdata->resume(pdev);
+
+ return -EOPNOTSUPP;
+}
+
+/**********************************************************************/
+/* entry point for ST KIM module, called in from ST Core */
+static struct platform_driver kim_platform_driver = {
+ .probe = kim_probe,
+ .remove = kim_remove,
+ .suspend = kim_suspend,
+ .resume = kim_resume,
+ .driver = {
+ .name = "kim",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(kim_platform_driver);
+
+MODULE_AUTHOR("Pavan Savoy <pavan_savoy@ti.com>");
+MODULE_DESCRIPTION("Shared Transport Driver for TI BT/FM/GPS combo chips ");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/ti-st/st_ll.c b/drivers/misc/ti-st/st_ll.c
new file mode 100644
index 00000000..1ff460a8
--- /dev/null
+++ b/drivers/misc/ti-st/st_ll.c
@@ -0,0 +1,169 @@
+/*
+ * Shared Transport driver
+ * HCI-LL module responsible for TI proprietary HCI_LL protocol
+ * Copyright (C) 2009-2010 Texas Instruments
+ * Author: Pavan Savoy <pavan_savoy@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#define pr_fmt(fmt) "(stll) :" fmt
+#include <linux/skbuff.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/ti_wilink_st.h>
+
+/**********************************************************************/
+/* internal functions */
+static void send_ll_cmd(struct st_data_s *st_data,
+ unsigned char cmd)
+{
+
+ pr_debug("%s: writing %x", __func__, cmd);
+ st_int_write(st_data, &cmd, 1);
+ return;
+}
+
+static void ll_device_want_to_sleep(struct st_data_s *st_data)
+{
+ struct kim_data_s *kim_data;
+ struct ti_st_plat_data *pdata;
+
+ pr_debug("%s", __func__);
+ /* sanity check */
+ if (st_data->ll_state != ST_LL_AWAKE)
+ pr_err("ERR hcill: ST_LL_GO_TO_SLEEP_IND"
+ "in state %ld", st_data->ll_state);
+
+ send_ll_cmd(st_data, LL_SLEEP_ACK);
+ /* update state */
+ st_data->ll_state = ST_LL_ASLEEP;
+
+ /* communicate to platform about chip asleep */
+ kim_data = st_data->kim_data;
+ pdata = kim_data->kim_pdev->dev.platform_data;
+ if (pdata->chip_asleep)
+ pdata->chip_asleep(NULL);
+}
+
+static void ll_device_want_to_wakeup(struct st_data_s *st_data)
+{
+ struct kim_data_s *kim_data;
+ struct ti_st_plat_data *pdata;
+
+ /* diff actions in diff states */
+ switch (st_data->ll_state) {
+ case ST_LL_ASLEEP:
+ send_ll_cmd(st_data, LL_WAKE_UP_ACK); /* send wake_ack */
+ break;
+ case ST_LL_ASLEEP_TO_AWAKE:
+ /* duplicate wake_ind */
+ pr_err("duplicate wake_ind while waiting for Wake ack");
+ break;
+ case ST_LL_AWAKE:
+ /* duplicate wake_ind */
+ pr_err("duplicate wake_ind already AWAKE");
+ break;
+ case ST_LL_AWAKE_TO_ASLEEP:
+ /* duplicate wake_ind */
+ pr_err("duplicate wake_ind");
+ break;
+ }
+ /* update state */
+ st_data->ll_state = ST_LL_AWAKE;
+
+ /* communicate to platform about chip wakeup */
+ kim_data = st_data->kim_data;
+ pdata = kim_data->kim_pdev->dev.platform_data;
+ if (pdata->chip_asleep)
+ pdata->chip_awake(NULL);
+}
+
+/**********************************************************************/
+/* functions invoked by ST Core */
+
+/* called when ST Core wants to
+ * enable ST LL */
+void st_ll_enable(struct st_data_s *ll)
+{
+ ll->ll_state = ST_LL_AWAKE;
+}
+
+/* called when ST Core /local module wants to
+ * disable ST LL */
+void st_ll_disable(struct st_data_s *ll)
+{
+ ll->ll_state = ST_LL_INVALID;
+}
+
+/* called when ST Core wants to update the state */
+void st_ll_wakeup(struct st_data_s *ll)
+{
+ if (likely(ll->ll_state != ST_LL_AWAKE)) {
+ send_ll_cmd(ll, LL_WAKE_UP_IND); /* WAKE_IND */
+ ll->ll_state = ST_LL_ASLEEP_TO_AWAKE;
+ } else {
+ /* don't send the duplicate wake_indication */
+ pr_err(" Chip already AWAKE ");
+ }
+}
+
+/* called when ST Core wants the state */
+unsigned long st_ll_getstate(struct st_data_s *ll)
+{
+ pr_debug(" returning state %ld", ll->ll_state);
+ return ll->ll_state;
+}
+
+/* called from ST Core, when a PM related packet arrives */
+unsigned long st_ll_sleep_state(struct st_data_s *st_data,
+ unsigned char cmd)
+{
+ switch (cmd) {
+ case LL_SLEEP_IND: /* sleep ind */
+ pr_debug("sleep indication recvd");
+ ll_device_want_to_sleep(st_data);
+ break;
+ case LL_SLEEP_ACK: /* sleep ack */
+ pr_err("sleep ack rcvd: host shouldn't");
+ break;
+ case LL_WAKE_UP_IND: /* wake ind */
+ pr_debug("wake indication recvd");
+ ll_device_want_to_wakeup(st_data);
+ break;
+ case LL_WAKE_UP_ACK: /* wake ack */
+ pr_debug("wake ack rcvd");
+ st_data->ll_state = ST_LL_AWAKE;
+ break;
+ default:
+ pr_err(" unknown input/state ");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* Called from ST CORE to initialize ST LL */
+long st_ll_init(struct st_data_s *ll)
+{
+ /* set state to invalid */
+ ll->ll_state = ST_LL_INVALID;
+ return 0;
+}
+
+/* Called from ST CORE to de-initialize ST LL */
+long st_ll_deinit(struct st_data_s *ll)
+{
+ return 0;
+}
diff --git a/drivers/misc/ti_dac7512.c b/drivers/misc/ti_dac7512.c
new file mode 100644
index 00000000..5acbba12
--- /dev/null
+++ b/drivers/misc/ti_dac7512.c
@@ -0,0 +1,90 @@
+/*
+ * dac7512.c - Linux kernel module for
+ * Texas Instruments DAC7512
+ *
+ * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/spi/spi.h>
+
+#define DAC7512_DRV_NAME "dac7512"
+#define DRIVER_VERSION "1.0"
+
+static ssize_t dac7512_store_val(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ unsigned char tmp[2];
+ unsigned long val;
+
+ if (strict_strtoul(buf, 10, &val) < 0)
+ return -EINVAL;
+
+ tmp[0] = val >> 8;
+ tmp[1] = val & 0xff;
+ spi_write(spi, tmp, sizeof(tmp));
+ return count;
+}
+
+static DEVICE_ATTR(value, S_IWUSR, NULL, dac7512_store_val);
+
+static struct attribute *dac7512_attributes[] = {
+ &dev_attr_value.attr,
+ NULL
+};
+
+static const struct attribute_group dac7512_attr_group = {
+ .attrs = dac7512_attributes,
+};
+
+static int __devinit dac7512_probe(struct spi_device *spi)
+{
+ int ret;
+
+ spi->bits_per_word = 8;
+ spi->mode = SPI_MODE_0;
+ ret = spi_setup(spi);
+ if (ret < 0)
+ return ret;
+
+ return sysfs_create_group(&spi->dev.kobj, &dac7512_attr_group);
+}
+
+static int __devexit dac7512_remove(struct spi_device *spi)
+{
+ sysfs_remove_group(&spi->dev.kobj, &dac7512_attr_group);
+ return 0;
+}
+
+static struct spi_driver dac7512_driver = {
+ .driver = {
+ .name = DAC7512_DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = dac7512_probe,
+ .remove = __devexit_p(dac7512_remove),
+};
+
+module_spi_driver(dac7512_driver);
+
+MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>");
+MODULE_DESCRIPTION("DAC7512 16-bit DAC");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRIVER_VERSION);
diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c
new file mode 100644
index 00000000..ba247902
--- /dev/null
+++ b/drivers/misc/tifm_7xx1.c
@@ -0,0 +1,454 @@
+/*
+ * tifm_7xx1.c - TI FlashMedia driver
+ *
+ * Copyright (C) 2006 Alex Dubov <oakad@yahoo.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/tifm.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+
+#define DRIVER_NAME "tifm_7xx1"
+#define DRIVER_VERSION "0.8"
+
+#define TIFM_IRQ_ENABLE 0x80000000
+#define TIFM_IRQ_SOCKMASK(x) (x)
+#define TIFM_IRQ_CARDMASK(x) ((x) << 8)
+#define TIFM_IRQ_FIFOMASK(x) ((x) << 16)
+#define TIFM_IRQ_SETALL 0xffffffff
+
+static void tifm_7xx1_dummy_eject(struct tifm_adapter *fm,
+ struct tifm_dev *sock)
+{
+}
+
+static void tifm_7xx1_eject(struct tifm_adapter *fm, struct tifm_dev *sock)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&fm->lock, flags);
+ fm->socket_change_set |= 1 << sock->socket_id;
+ tifm_queue_work(&fm->media_switcher);
+ spin_unlock_irqrestore(&fm->lock, flags);
+}
+
+static irqreturn_t tifm_7xx1_isr(int irq, void *dev_id)
+{
+ struct tifm_adapter *fm = dev_id;
+ struct tifm_dev *sock;
+ unsigned int irq_status, cnt;
+
+ spin_lock(&fm->lock);
+ irq_status = readl(fm->addr + FM_INTERRUPT_STATUS);
+ if (irq_status == 0 || irq_status == (~0)) {
+ spin_unlock(&fm->lock);
+ return IRQ_NONE;
+ }
+
+ if (irq_status & TIFM_IRQ_ENABLE) {
+ writel(TIFM_IRQ_ENABLE, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
+
+ for (cnt = 0; cnt < fm->num_sockets; cnt++) {
+ sock = fm->sockets[cnt];
+ if (sock) {
+ if ((irq_status >> cnt) & TIFM_IRQ_FIFOMASK(1))
+ sock->data_event(sock);
+ if ((irq_status >> cnt) & TIFM_IRQ_CARDMASK(1))
+ sock->card_event(sock);
+ }
+ }
+
+ fm->socket_change_set |= irq_status
+ & ((1 << fm->num_sockets) - 1);
+ }
+ writel(irq_status, fm->addr + FM_INTERRUPT_STATUS);
+
+ if (fm->finish_me)
+ complete_all(fm->finish_me);
+ else if (!fm->socket_change_set)
+ writel(TIFM_IRQ_ENABLE, fm->addr + FM_SET_INTERRUPT_ENABLE);
+ else
+ tifm_queue_work(&fm->media_switcher);
+
+ spin_unlock(&fm->lock);
+ return IRQ_HANDLED;
+}
+
+static unsigned char tifm_7xx1_toggle_sock_power(char __iomem *sock_addr)
+{
+ unsigned int s_state;
+ int cnt;
+
+ writel(0x0e00, sock_addr + SOCK_CONTROL);
+
+ for (cnt = 16; cnt <= 256; cnt <<= 1) {
+ if (!(TIFM_SOCK_STATE_POWERED
+ & readl(sock_addr + SOCK_PRESENT_STATE)))
+ break;
+
+ msleep(cnt);
+ }
+
+ s_state = readl(sock_addr + SOCK_PRESENT_STATE);
+ if (!(TIFM_SOCK_STATE_OCCUPIED & s_state))
+ return 0;
+
+ writel(readl(sock_addr + SOCK_CONTROL) | TIFM_CTRL_LED,
+ sock_addr + SOCK_CONTROL);
+
+ /* xd needs some extra time before power on */
+ if (((readl(sock_addr + SOCK_PRESENT_STATE) >> 4) & 7)
+ == TIFM_TYPE_XD)
+ msleep(40);
+
+ writel((s_state & TIFM_CTRL_POWER_MASK) | 0x0c00,
+ sock_addr + SOCK_CONTROL);
+ /* wait for power to stabilize */
+ msleep(20);
+ for (cnt = 16; cnt <= 256; cnt <<= 1) {
+ if ((TIFM_SOCK_STATE_POWERED
+ & readl(sock_addr + SOCK_PRESENT_STATE)))
+ break;
+
+ msleep(cnt);
+ }
+
+ writel(readl(sock_addr + SOCK_CONTROL) & (~TIFM_CTRL_LED),
+ sock_addr + SOCK_CONTROL);
+
+ return (readl(sock_addr + SOCK_PRESENT_STATE) >> 4) & 7;
+}
+
+inline static void tifm_7xx1_sock_power_off(char __iomem *sock_addr)
+{
+ writel((~TIFM_CTRL_POWER_MASK) & readl(sock_addr + SOCK_CONTROL),
+ sock_addr + SOCK_CONTROL);
+}
+
+inline static char __iomem *
+tifm_7xx1_sock_addr(char __iomem *base_addr, unsigned int sock_num)
+{
+ return base_addr + ((sock_num + 1) << 10);
+}
+
+static void tifm_7xx1_switch_media(struct work_struct *work)
+{
+ struct tifm_adapter *fm = container_of(work, struct tifm_adapter,
+ media_switcher);
+ struct tifm_dev *sock;
+ char __iomem *sock_addr;
+ unsigned long flags;
+ unsigned char media_id;
+ unsigned int socket_change_set, cnt;
+
+ spin_lock_irqsave(&fm->lock, flags);
+ socket_change_set = fm->socket_change_set;
+ fm->socket_change_set = 0;
+
+ dev_dbg(fm->dev.parent, "checking media set %x\n",
+ socket_change_set);
+
+ if (!socket_change_set) {
+ spin_unlock_irqrestore(&fm->lock, flags);
+ return;
+ }
+
+ for (cnt = 0; cnt < fm->num_sockets; cnt++) {
+ if (!(socket_change_set & (1 << cnt)))
+ continue;
+ sock = fm->sockets[cnt];
+ if (sock) {
+ printk(KERN_INFO
+ "%s : demand removing card from socket %u:%u\n",
+ dev_name(&fm->dev), fm->id, cnt);
+ fm->sockets[cnt] = NULL;
+ sock_addr = sock->addr;
+ spin_unlock_irqrestore(&fm->lock, flags);
+ device_unregister(&sock->dev);
+ spin_lock_irqsave(&fm->lock, flags);
+ tifm_7xx1_sock_power_off(sock_addr);
+ writel(0x0e00, sock_addr + SOCK_CONTROL);
+ }
+
+ spin_unlock_irqrestore(&fm->lock, flags);
+
+ media_id = tifm_7xx1_toggle_sock_power(
+ tifm_7xx1_sock_addr(fm->addr, cnt));
+
+ // tifm_alloc_device will check if media_id is valid
+ sock = tifm_alloc_device(fm, cnt, media_id);
+ if (sock) {
+ sock->addr = tifm_7xx1_sock_addr(fm->addr, cnt);
+
+ if (!device_register(&sock->dev)) {
+ spin_lock_irqsave(&fm->lock, flags);
+ if (!fm->sockets[cnt]) {
+ fm->sockets[cnt] = sock;
+ sock = NULL;
+ }
+ spin_unlock_irqrestore(&fm->lock, flags);
+ }
+ if (sock)
+ tifm_free_device(&sock->dev);
+ }
+ spin_lock_irqsave(&fm->lock, flags);
+ }
+
+ writel(TIFM_IRQ_FIFOMASK(socket_change_set)
+ | TIFM_IRQ_CARDMASK(socket_change_set),
+ fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
+
+ writel(TIFM_IRQ_FIFOMASK(socket_change_set)
+ | TIFM_IRQ_CARDMASK(socket_change_set),
+ fm->addr + FM_SET_INTERRUPT_ENABLE);
+
+ writel(TIFM_IRQ_ENABLE, fm->addr + FM_SET_INTERRUPT_ENABLE);
+ spin_unlock_irqrestore(&fm->lock, flags);
+}
+
+#ifdef CONFIG_PM
+
+static int tifm_7xx1_suspend(struct pci_dev *dev, pm_message_t state)
+{
+ struct tifm_adapter *fm = pci_get_drvdata(dev);
+ int cnt;
+
+ dev_dbg(&dev->dev, "suspending host\n");
+
+ for (cnt = 0; cnt < fm->num_sockets; cnt++) {
+ if (fm->sockets[cnt])
+ tifm_7xx1_sock_power_off(fm->sockets[cnt]->addr);
+ }
+
+ pci_save_state(dev);
+ pci_enable_wake(dev, pci_choose_state(dev, state), 0);
+ pci_disable_device(dev);
+ pci_set_power_state(dev, pci_choose_state(dev, state));
+ return 0;
+}
+
+static int tifm_7xx1_resume(struct pci_dev *dev)
+{
+ struct tifm_adapter *fm = pci_get_drvdata(dev);
+ int rc;
+ unsigned int good_sockets = 0, bad_sockets = 0;
+ unsigned long flags;
+ unsigned char new_ids[fm->num_sockets];
+ DECLARE_COMPLETION_ONSTACK(finish_resume);
+
+ pci_set_power_state(dev, PCI_D0);
+ pci_restore_state(dev);
+ rc = pci_enable_device(dev);
+ if (rc)
+ return rc;
+ pci_set_master(dev);
+
+ dev_dbg(&dev->dev, "resuming host\n");
+
+ for (rc = 0; rc < fm->num_sockets; rc++)
+ new_ids[rc] = tifm_7xx1_toggle_sock_power(
+ tifm_7xx1_sock_addr(fm->addr, rc));
+ spin_lock_irqsave(&fm->lock, flags);
+ for (rc = 0; rc < fm->num_sockets; rc++) {
+ if (fm->sockets[rc]) {
+ if (fm->sockets[rc]->type == new_ids[rc])
+ good_sockets |= 1 << rc;
+ else
+ bad_sockets |= 1 << rc;
+ }
+ }
+
+ writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SOCKMASK((1 << fm->num_sockets) - 1),
+ fm->addr + FM_SET_INTERRUPT_ENABLE);
+ dev_dbg(&dev->dev, "change sets on resume: good %x, bad %x\n",
+ good_sockets, bad_sockets);
+
+ fm->socket_change_set = 0;
+ if (good_sockets) {
+ fm->finish_me = &finish_resume;
+ spin_unlock_irqrestore(&fm->lock, flags);
+ rc = wait_for_completion_timeout(&finish_resume, HZ);
+ dev_dbg(&dev->dev, "wait returned %d\n", rc);
+ writel(TIFM_IRQ_FIFOMASK(good_sockets)
+ | TIFM_IRQ_CARDMASK(good_sockets),
+ fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
+ writel(TIFM_IRQ_FIFOMASK(good_sockets)
+ | TIFM_IRQ_CARDMASK(good_sockets),
+ fm->addr + FM_SET_INTERRUPT_ENABLE);
+ spin_lock_irqsave(&fm->lock, flags);
+ fm->finish_me = NULL;
+ fm->socket_change_set ^= good_sockets & fm->socket_change_set;
+ }
+
+ fm->socket_change_set |= bad_sockets;
+ if (fm->socket_change_set)
+ tifm_queue_work(&fm->media_switcher);
+
+ spin_unlock_irqrestore(&fm->lock, flags);
+ writel(TIFM_IRQ_ENABLE,
+ fm->addr + FM_SET_INTERRUPT_ENABLE);
+
+ return 0;
+}
+
+#else
+
+#define tifm_7xx1_suspend NULL
+#define tifm_7xx1_resume NULL
+
+#endif /* CONFIG_PM */
+
+static int tifm_7xx1_dummy_has_ms_pif(struct tifm_adapter *fm,
+ struct tifm_dev *sock)
+{
+ return 0;
+}
+
+static int tifm_7xx1_has_ms_pif(struct tifm_adapter *fm, struct tifm_dev *sock)
+{
+ if (((fm->num_sockets == 4) && (sock->socket_id == 2))
+ || ((fm->num_sockets == 2) && (sock->socket_id == 0)))
+ return 1;
+
+ return 0;
+}
+
+static int tifm_7xx1_probe(struct pci_dev *dev,
+ const struct pci_device_id *dev_id)
+{
+ struct tifm_adapter *fm;
+ int pci_dev_busy = 0;
+ int rc;
+
+ rc = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
+ if (rc)
+ return rc;
+
+ rc = pci_enable_device(dev);
+ if (rc)
+ return rc;
+
+ pci_set_master(dev);
+
+ rc = pci_request_regions(dev, DRIVER_NAME);
+ if (rc) {
+ pci_dev_busy = 1;
+ goto err_out;
+ }
+
+ pci_intx(dev, 1);
+
+ fm = tifm_alloc_adapter(dev->device == PCI_DEVICE_ID_TI_XX21_XX11_FM
+ ? 4 : 2, &dev->dev);
+ if (!fm) {
+ rc = -ENOMEM;
+ goto err_out_int;
+ }
+
+ INIT_WORK(&fm->media_switcher, tifm_7xx1_switch_media);
+ fm->eject = tifm_7xx1_eject;
+ fm->has_ms_pif = tifm_7xx1_has_ms_pif;
+ pci_set_drvdata(dev, fm);
+
+ fm->addr = pci_ioremap_bar(dev, 0);
+ if (!fm->addr)
+ goto err_out_free;
+
+ rc = request_irq(dev->irq, tifm_7xx1_isr, IRQF_SHARED, DRIVER_NAME, fm);
+ if (rc)
+ goto err_out_unmap;
+
+ rc = tifm_add_adapter(fm);
+ if (rc)
+ goto err_out_irq;
+
+ writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SOCKMASK((1 << fm->num_sockets) - 1),
+ fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
+ writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SOCKMASK((1 << fm->num_sockets) - 1),
+ fm->addr + FM_SET_INTERRUPT_ENABLE);
+ return 0;
+
+err_out_irq:
+ free_irq(dev->irq, fm);
+err_out_unmap:
+ iounmap(fm->addr);
+err_out_free:
+ pci_set_drvdata(dev, NULL);
+ tifm_free_adapter(fm);
+err_out_int:
+ pci_intx(dev, 0);
+ pci_release_regions(dev);
+err_out:
+ if (!pci_dev_busy)
+ pci_disable_device(dev);
+ return rc;
+}
+
+static void tifm_7xx1_remove(struct pci_dev *dev)
+{
+ struct tifm_adapter *fm = pci_get_drvdata(dev);
+ int cnt;
+
+ fm->eject = tifm_7xx1_dummy_eject;
+ fm->has_ms_pif = tifm_7xx1_dummy_has_ms_pif;
+ writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
+ mmiowb();
+ free_irq(dev->irq, fm);
+
+ tifm_remove_adapter(fm);
+
+ for (cnt = 0; cnt < fm->num_sockets; cnt++)
+ tifm_7xx1_sock_power_off(tifm_7xx1_sock_addr(fm->addr, cnt));
+
+ pci_set_drvdata(dev, NULL);
+
+ iounmap(fm->addr);
+ pci_intx(dev, 0);
+ pci_release_regions(dev);
+
+ pci_disable_device(dev);
+ tifm_free_adapter(fm);
+}
+
+static struct pci_device_id tifm_7xx1_pci_tbl [] = {
+ { PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XX21_XX11_FM, PCI_ANY_ID,
+ PCI_ANY_ID, 0, 0, 0 }, /* xx21 - the one I have */
+ { PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XX12_FM, PCI_ANY_ID,
+ PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XX20_FM, PCI_ANY_ID,
+ PCI_ANY_ID, 0, 0, 0 },
+ { }
+};
+
+static struct pci_driver tifm_7xx1_driver = {
+ .name = DRIVER_NAME,
+ .id_table = tifm_7xx1_pci_tbl,
+ .probe = tifm_7xx1_probe,
+ .remove = tifm_7xx1_remove,
+ .suspend = tifm_7xx1_suspend,
+ .resume = tifm_7xx1_resume,
+};
+
+static int __init tifm_7xx1_init(void)
+{
+ return pci_register_driver(&tifm_7xx1_driver);
+}
+
+static void __exit tifm_7xx1_exit(void)
+{
+ pci_unregister_driver(&tifm_7xx1_driver);
+}
+
+MODULE_AUTHOR("Alex Dubov");
+MODULE_DESCRIPTION("TI FlashMedia host driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, tifm_7xx1_pci_tbl);
+MODULE_VERSION(DRIVER_VERSION);
+
+module_init(tifm_7xx1_init);
+module_exit(tifm_7xx1_exit);
diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c
new file mode 100644
index 00000000..0bd5349b
--- /dev/null
+++ b/drivers/misc/tifm_core.c
@@ -0,0 +1,368 @@
+/*
+ * tifm_core.c - TI FlashMedia driver
+ *
+ * Copyright (C) 2006 Alex Dubov <oakad@yahoo.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/tifm.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/idr.h>
+#include <linux/module.h>
+
+#define DRIVER_NAME "tifm_core"
+#define DRIVER_VERSION "0.8"
+
+static struct workqueue_struct *workqueue;
+static DEFINE_IDR(tifm_adapter_idr);
+static DEFINE_SPINLOCK(tifm_adapter_lock);
+
+static const char *tifm_media_type_name(unsigned char type, unsigned char nt)
+{
+ const char *card_type_name[3][3] = {
+ { "SmartMedia/xD", "MemoryStick", "MMC/SD" },
+ { "XD", "MS", "SD"},
+ { "xd", "ms", "sd"}
+ };
+
+ if (nt > 2 || type < 1 || type > 3)
+ return NULL;
+ return card_type_name[nt][type - 1];
+}
+
+static int tifm_dev_match(struct tifm_dev *sock, struct tifm_device_id *id)
+{
+ if (sock->type == id->type)
+ return 1;
+ return 0;
+}
+
+static int tifm_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev);
+ struct tifm_driver *fm_drv = container_of(drv, struct tifm_driver,
+ driver);
+ struct tifm_device_id *ids = fm_drv->id_table;
+
+ if (ids) {
+ while (ids->type) {
+ if (tifm_dev_match(sock, ids))
+ return 1;
+ ++ids;
+ }
+ }
+ return 0;
+}
+
+static int tifm_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev);
+
+ if (add_uevent_var(env, "TIFM_CARD_TYPE=%s", tifm_media_type_name(sock->type, 1)))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int tifm_device_probe(struct device *dev)
+{
+ struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev);
+ struct tifm_driver *drv = container_of(dev->driver, struct tifm_driver,
+ driver);
+ int rc = -ENODEV;
+
+ get_device(dev);
+ if (dev->driver && drv->probe) {
+ rc = drv->probe(sock);
+ if (!rc)
+ return 0;
+ }
+ put_device(dev);
+ return rc;
+}
+
+static void tifm_dummy_event(struct tifm_dev *sock)
+{
+ return;
+}
+
+static int tifm_device_remove(struct device *dev)
+{
+ struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev);
+ struct tifm_driver *drv = container_of(dev->driver, struct tifm_driver,
+ driver);
+
+ if (dev->driver && drv->remove) {
+ sock->card_event = tifm_dummy_event;
+ sock->data_event = tifm_dummy_event;
+ drv->remove(sock);
+ sock->dev.driver = NULL;
+ }
+
+ put_device(dev);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int tifm_device_suspend(struct device *dev, pm_message_t state)
+{
+ struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev);
+ struct tifm_driver *drv = container_of(dev->driver, struct tifm_driver,
+ driver);
+
+ if (dev->driver && drv->suspend)
+ return drv->suspend(sock, state);
+ return 0;
+}
+
+static int tifm_device_resume(struct device *dev)
+{
+ struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev);
+ struct tifm_driver *drv = container_of(dev->driver, struct tifm_driver,
+ driver);
+
+ if (dev->driver && drv->resume)
+ return drv->resume(sock);
+ return 0;
+}
+
+#else
+
+#define tifm_device_suspend NULL
+#define tifm_device_resume NULL
+
+#endif /* CONFIG_PM */
+
+static ssize_t type_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev);
+ return sprintf(buf, "%x", sock->type);
+}
+
+static struct device_attribute tifm_dev_attrs[] = {
+ __ATTR(type, S_IRUGO, type_show, NULL),
+ __ATTR_NULL
+};
+
+static struct bus_type tifm_bus_type = {
+ .name = "tifm",
+ .dev_attrs = tifm_dev_attrs,
+ .match = tifm_bus_match,
+ .uevent = tifm_uevent,
+ .probe = tifm_device_probe,
+ .remove = tifm_device_remove,
+ .suspend = tifm_device_suspend,
+ .resume = tifm_device_resume
+};
+
+static void tifm_free(struct device *dev)
+{
+ struct tifm_adapter *fm = container_of(dev, struct tifm_adapter, dev);
+
+ kfree(fm);
+}
+
+static struct class tifm_adapter_class = {
+ .name = "tifm_adapter",
+ .dev_release = tifm_free
+};
+
+struct tifm_adapter *tifm_alloc_adapter(unsigned int num_sockets,
+ struct device *dev)
+{
+ struct tifm_adapter *fm;
+
+ fm = kzalloc(sizeof(struct tifm_adapter)
+ + sizeof(struct tifm_dev*) * num_sockets, GFP_KERNEL);
+ if (fm) {
+ fm->dev.class = &tifm_adapter_class;
+ fm->dev.parent = dev;
+ device_initialize(&fm->dev);
+ spin_lock_init(&fm->lock);
+ fm->num_sockets = num_sockets;
+ }
+ return fm;
+}
+EXPORT_SYMBOL(tifm_alloc_adapter);
+
+int tifm_add_adapter(struct tifm_adapter *fm)
+{
+ int rc;
+
+ if (!idr_pre_get(&tifm_adapter_idr, GFP_KERNEL))
+ return -ENOMEM;
+
+ spin_lock(&tifm_adapter_lock);
+ rc = idr_get_new(&tifm_adapter_idr, fm, &fm->id);
+ spin_unlock(&tifm_adapter_lock);
+ if (rc)
+ return rc;
+
+ dev_set_name(&fm->dev, "tifm%u", fm->id);
+ rc = device_add(&fm->dev);
+ if (rc) {
+ spin_lock(&tifm_adapter_lock);
+ idr_remove(&tifm_adapter_idr, fm->id);
+ spin_unlock(&tifm_adapter_lock);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(tifm_add_adapter);
+
+void tifm_remove_adapter(struct tifm_adapter *fm)
+{
+ unsigned int cnt;
+
+ flush_workqueue(workqueue);
+ for (cnt = 0; cnt < fm->num_sockets; ++cnt) {
+ if (fm->sockets[cnt])
+ device_unregister(&fm->sockets[cnt]->dev);
+ }
+
+ spin_lock(&tifm_adapter_lock);
+ idr_remove(&tifm_adapter_idr, fm->id);
+ spin_unlock(&tifm_adapter_lock);
+ device_del(&fm->dev);
+}
+EXPORT_SYMBOL(tifm_remove_adapter);
+
+void tifm_free_adapter(struct tifm_adapter *fm)
+{
+ put_device(&fm->dev);
+}
+EXPORT_SYMBOL(tifm_free_adapter);
+
+void tifm_free_device(struct device *dev)
+{
+ struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev);
+ kfree(sock);
+}
+EXPORT_SYMBOL(tifm_free_device);
+
+struct tifm_dev *tifm_alloc_device(struct tifm_adapter *fm, unsigned int id,
+ unsigned char type)
+{
+ struct tifm_dev *sock = NULL;
+
+ if (!tifm_media_type_name(type, 0))
+ return sock;
+
+ sock = kzalloc(sizeof(struct tifm_dev), GFP_KERNEL);
+ if (sock) {
+ spin_lock_init(&sock->lock);
+ sock->type = type;
+ sock->socket_id = id;
+ sock->card_event = tifm_dummy_event;
+ sock->data_event = tifm_dummy_event;
+
+ sock->dev.parent = fm->dev.parent;
+ sock->dev.bus = &tifm_bus_type;
+ sock->dev.dma_mask = fm->dev.parent->dma_mask;
+ sock->dev.release = tifm_free_device;
+
+ dev_set_name(&sock->dev, "tifm_%s%u:%u",
+ tifm_media_type_name(type, 2), fm->id, id);
+ printk(KERN_INFO DRIVER_NAME
+ ": %s card detected in socket %u:%u\n",
+ tifm_media_type_name(type, 0), fm->id, id);
+ }
+ return sock;
+}
+EXPORT_SYMBOL(tifm_alloc_device);
+
+void tifm_eject(struct tifm_dev *sock)
+{
+ struct tifm_adapter *fm = dev_get_drvdata(sock->dev.parent);
+ fm->eject(fm, sock);
+}
+EXPORT_SYMBOL(tifm_eject);
+
+int tifm_has_ms_pif(struct tifm_dev *sock)
+{
+ struct tifm_adapter *fm = dev_get_drvdata(sock->dev.parent);
+ return fm->has_ms_pif(fm, sock);
+}
+EXPORT_SYMBOL(tifm_has_ms_pif);
+
+int tifm_map_sg(struct tifm_dev *sock, struct scatterlist *sg, int nents,
+ int direction)
+{
+ return pci_map_sg(to_pci_dev(sock->dev.parent), sg, nents, direction);
+}
+EXPORT_SYMBOL(tifm_map_sg);
+
+void tifm_unmap_sg(struct tifm_dev *sock, struct scatterlist *sg, int nents,
+ int direction)
+{
+ pci_unmap_sg(to_pci_dev(sock->dev.parent), sg, nents, direction);
+}
+EXPORT_SYMBOL(tifm_unmap_sg);
+
+void tifm_queue_work(struct work_struct *work)
+{
+ queue_work(workqueue, work);
+}
+EXPORT_SYMBOL(tifm_queue_work);
+
+int tifm_register_driver(struct tifm_driver *drv)
+{
+ drv->driver.bus = &tifm_bus_type;
+
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL(tifm_register_driver);
+
+void tifm_unregister_driver(struct tifm_driver *drv)
+{
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL(tifm_unregister_driver);
+
+static int __init tifm_init(void)
+{
+ int rc;
+
+ workqueue = create_freezable_workqueue("tifm");
+ if (!workqueue)
+ return -ENOMEM;
+
+ rc = bus_register(&tifm_bus_type);
+
+ if (rc)
+ goto err_out_wq;
+
+ rc = class_register(&tifm_adapter_class);
+ if (!rc)
+ return 0;
+
+ bus_unregister(&tifm_bus_type);
+
+err_out_wq:
+ destroy_workqueue(workqueue);
+
+ return rc;
+}
+
+static void __exit tifm_exit(void)
+{
+ class_unregister(&tifm_adapter_class);
+ bus_unregister(&tifm_bus_type);
+ destroy_workqueue(workqueue);
+}
+
+subsys_initcall(tifm_init);
+module_exit(tifm_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alex Dubov");
+MODULE_DESCRIPTION("TI FlashMedia core driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRIVER_VERSION);
diff --git a/drivers/misc/tsl2550.c b/drivers/misc/tsl2550.c
new file mode 100644
index 00000000..0beb298a
--- /dev/null
+++ b/drivers/misc/tsl2550.c
@@ -0,0 +1,462 @@
+/*
+ * tsl2550.c - Linux kernel modules for ambient light sensor
+ *
+ * Copyright (C) 2007 Rodolfo Giometti <giometti@linux.it>
+ * Copyright (C) 2007 Eurotech S.p.A. <info@eurotech.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+
+#define TSL2550_DRV_NAME "tsl2550"
+#define DRIVER_VERSION "1.2"
+
+/*
+ * Defines
+ */
+
+#define TSL2550_POWER_DOWN 0x00
+#define TSL2550_POWER_UP 0x03
+#define TSL2550_STANDARD_RANGE 0x18
+#define TSL2550_EXTENDED_RANGE 0x1d
+#define TSL2550_READ_ADC0 0x43
+#define TSL2550_READ_ADC1 0x83
+
+/*
+ * Structs
+ */
+
+struct tsl2550_data {
+ struct i2c_client *client;
+ struct mutex update_lock;
+
+ unsigned int power_state:1;
+ unsigned int operating_mode:1;
+};
+
+/*
+ * Global data
+ */
+
+static const u8 TSL2550_MODE_RANGE[2] = {
+ TSL2550_STANDARD_RANGE, TSL2550_EXTENDED_RANGE,
+};
+
+/*
+ * Management functions
+ */
+
+static int tsl2550_set_operating_mode(struct i2c_client *client, int mode)
+{
+ struct tsl2550_data *data = i2c_get_clientdata(client);
+
+ int ret = i2c_smbus_write_byte(client, TSL2550_MODE_RANGE[mode]);
+
+ data->operating_mode = mode;
+
+ return ret;
+}
+
+static int tsl2550_set_power_state(struct i2c_client *client, int state)
+{
+ struct tsl2550_data *data = i2c_get_clientdata(client);
+ int ret;
+
+ if (state == 0)
+ ret = i2c_smbus_write_byte(client, TSL2550_POWER_DOWN);
+ else {
+ ret = i2c_smbus_write_byte(client, TSL2550_POWER_UP);
+
+ /* On power up we should reset operating mode also... */
+ tsl2550_set_operating_mode(client, data->operating_mode);
+ }
+
+ data->power_state = state;
+
+ return ret;
+}
+
+static int tsl2550_get_adc_value(struct i2c_client *client, u8 cmd)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, cmd);
+ if (ret < 0)
+ return ret;
+ if (!(ret & 0x80))
+ return -EAGAIN;
+ return ret & 0x7f; /* remove the "valid" bit */
+}
+
+/*
+ * LUX calculation
+ */
+
+#define TSL2550_MAX_LUX 1846
+
+static const u8 ratio_lut[] = {
+ 100, 100, 100, 100, 100, 100, 100, 100,
+ 100, 100, 100, 100, 100, 100, 99, 99,
+ 99, 99, 99, 99, 99, 99, 99, 99,
+ 99, 99, 99, 98, 98, 98, 98, 98,
+ 98, 98, 97, 97, 97, 97, 97, 96,
+ 96, 96, 96, 95, 95, 95, 94, 94,
+ 93, 93, 93, 92, 92, 91, 91, 90,
+ 89, 89, 88, 87, 87, 86, 85, 84,
+ 83, 82, 81, 80, 79, 78, 77, 75,
+ 74, 73, 71, 69, 68, 66, 64, 62,
+ 60, 58, 56, 54, 52, 49, 47, 44,
+ 42, 41, 40, 40, 39, 39, 38, 38,
+ 37, 37, 37, 36, 36, 36, 35, 35,
+ 35, 35, 34, 34, 34, 34, 33, 33,
+ 33, 33, 32, 32, 32, 32, 32, 31,
+ 31, 31, 31, 31, 30, 30, 30, 30,
+ 30,
+};
+
+static const u16 count_lut[] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 18, 20, 22, 24, 26, 28, 30,
+ 32, 34, 36, 38, 40, 42, 44, 46,
+ 49, 53, 57, 61, 65, 69, 73, 77,
+ 81, 85, 89, 93, 97, 101, 105, 109,
+ 115, 123, 131, 139, 147, 155, 163, 171,
+ 179, 187, 195, 203, 211, 219, 227, 235,
+ 247, 263, 279, 295, 311, 327, 343, 359,
+ 375, 391, 407, 423, 439, 455, 471, 487,
+ 511, 543, 575, 607, 639, 671, 703, 735,
+ 767, 799, 831, 863, 895, 927, 959, 991,
+ 1039, 1103, 1167, 1231, 1295, 1359, 1423, 1487,
+ 1551, 1615, 1679, 1743, 1807, 1871, 1935, 1999,
+ 2095, 2223, 2351, 2479, 2607, 2735, 2863, 2991,
+ 3119, 3247, 3375, 3503, 3631, 3759, 3887, 4015,
+};
+
+/*
+ * This function is described into Taos TSL2550 Designer's Notebook
+ * pages 2, 3.
+ */
+static int tsl2550_calculate_lux(u8 ch0, u8 ch1)
+{
+ unsigned int lux;
+
+ /* Look up count from channel values */
+ u16 c0 = count_lut[ch0];
+ u16 c1 = count_lut[ch1];
+
+ /*
+ * Calculate ratio.
+ * Note: the "128" is a scaling factor
+ */
+ u8 r = 128;
+
+ /* Avoid division by 0 and count 1 cannot be greater than count 0 */
+ if (c1 <= c0)
+ if (c0) {
+ r = c1 * 128 / c0;
+
+ /* Calculate LUX */
+ lux = ((c0 - c1) * ratio_lut[r]) / 256;
+ } else
+ lux = 0;
+ else
+ return -EAGAIN;
+
+ /* LUX range check */
+ return lux > TSL2550_MAX_LUX ? TSL2550_MAX_LUX : lux;
+}
+
+/*
+ * SysFS support
+ */
+
+static ssize_t tsl2550_show_power_state(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tsl2550_data *data = i2c_get_clientdata(to_i2c_client(dev));
+
+ return sprintf(buf, "%u\n", data->power_state);
+}
+
+static ssize_t tsl2550_store_power_state(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct tsl2550_data *data = i2c_get_clientdata(client);
+ unsigned long val = simple_strtoul(buf, NULL, 10);
+ int ret;
+
+ if (val < 0 || val > 1)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+ ret = tsl2550_set_power_state(client, val);
+ mutex_unlock(&data->update_lock);
+
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR(power_state, S_IWUSR | S_IRUGO,
+ tsl2550_show_power_state, tsl2550_store_power_state);
+
+static ssize_t tsl2550_show_operating_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tsl2550_data *data = i2c_get_clientdata(to_i2c_client(dev));
+
+ return sprintf(buf, "%u\n", data->operating_mode);
+}
+
+static ssize_t tsl2550_store_operating_mode(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct tsl2550_data *data = i2c_get_clientdata(client);
+ unsigned long val = simple_strtoul(buf, NULL, 10);
+ int ret;
+
+ if (val < 0 || val > 1)
+ return -EINVAL;
+
+ if (data->power_state == 0)
+ return -EBUSY;
+
+ mutex_lock(&data->update_lock);
+ ret = tsl2550_set_operating_mode(client, val);
+ mutex_unlock(&data->update_lock);
+
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR(operating_mode, S_IWUSR | S_IRUGO,
+ tsl2550_show_operating_mode, tsl2550_store_operating_mode);
+
+static ssize_t __tsl2550_show_lux(struct i2c_client *client, char *buf)
+{
+ struct tsl2550_data *data = i2c_get_clientdata(client);
+ u8 ch0, ch1;
+ int ret;
+
+ ret = tsl2550_get_adc_value(client, TSL2550_READ_ADC0);
+ if (ret < 0)
+ return ret;
+ ch0 = ret;
+
+ ret = tsl2550_get_adc_value(client, TSL2550_READ_ADC1);
+ if (ret < 0)
+ return ret;
+ ch1 = ret;
+
+ /* Do the job */
+ ret = tsl2550_calculate_lux(ch0, ch1);
+ if (ret < 0)
+ return ret;
+ if (data->operating_mode == 1)
+ ret *= 5;
+
+ return sprintf(buf, "%d\n", ret);
+}
+
+static ssize_t tsl2550_show_lux1_input(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct tsl2550_data *data = i2c_get_clientdata(client);
+ int ret;
+
+ /* No LUX data if not operational */
+ if (!data->power_state)
+ return -EBUSY;
+
+ mutex_lock(&data->update_lock);
+ ret = __tsl2550_show_lux(client, buf);
+ mutex_unlock(&data->update_lock);
+
+ return ret;
+}
+
+static DEVICE_ATTR(lux1_input, S_IRUGO,
+ tsl2550_show_lux1_input, NULL);
+
+static struct attribute *tsl2550_attributes[] = {
+ &dev_attr_power_state.attr,
+ &dev_attr_operating_mode.attr,
+ &dev_attr_lux1_input.attr,
+ NULL
+};
+
+static const struct attribute_group tsl2550_attr_group = {
+ .attrs = tsl2550_attributes,
+};
+
+/*
+ * Initialization function
+ */
+
+static int tsl2550_init_client(struct i2c_client *client)
+{
+ struct tsl2550_data *data = i2c_get_clientdata(client);
+ int err;
+
+ /*
+ * Probe the chip. To do so we try to power up the device and then to
+ * read back the 0x03 code
+ */
+ err = i2c_smbus_read_byte_data(client, TSL2550_POWER_UP);
+ if (err < 0)
+ return err;
+ if (err != TSL2550_POWER_UP)
+ return -ENODEV;
+ data->power_state = 1;
+
+ /* Set the default operating mode */
+ err = i2c_smbus_write_byte(client,
+ TSL2550_MODE_RANGE[data->operating_mode]);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+/*
+ * I2C init/probing/exit functions
+ */
+
+static struct i2c_driver tsl2550_driver;
+static int __devinit tsl2550_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct tsl2550_data *data;
+ int *opmode, err = 0;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WRITE_BYTE
+ | I2C_FUNC_SMBUS_READ_BYTE_DATA)) {
+ err = -EIO;
+ goto exit;
+ }
+
+ data = kzalloc(sizeof(struct tsl2550_data), GFP_KERNEL);
+ if (!data) {
+ err = -ENOMEM;
+ goto exit;
+ }
+ data->client = client;
+ i2c_set_clientdata(client, data);
+
+ /* Check platform data */
+ opmode = client->dev.platform_data;
+ if (opmode) {
+ if (*opmode < 0 || *opmode > 1) {
+ dev_err(&client->dev, "invalid operating_mode (%d)\n",
+ *opmode);
+ err = -EINVAL;
+ goto exit_kfree;
+ }
+ data->operating_mode = *opmode;
+ } else
+ data->operating_mode = 0; /* default mode is standard */
+ dev_info(&client->dev, "%s operating mode\n",
+ data->operating_mode ? "extended" : "standard");
+
+ mutex_init(&data->update_lock);
+
+ /* Initialize the TSL2550 chip */
+ err = tsl2550_init_client(client);
+ if (err)
+ goto exit_kfree;
+
+ /* Register sysfs hooks */
+ err = sysfs_create_group(&client->dev.kobj, &tsl2550_attr_group);
+ if (err)
+ goto exit_kfree;
+
+ dev_info(&client->dev, "support ver. %s enabled\n", DRIVER_VERSION);
+
+ return 0;
+
+exit_kfree:
+ kfree(data);
+exit:
+ return err;
+}
+
+static int __devexit tsl2550_remove(struct i2c_client *client)
+{
+ sysfs_remove_group(&client->dev.kobj, &tsl2550_attr_group);
+
+ /* Power down the device */
+ tsl2550_set_power_state(client, 0);
+
+ kfree(i2c_get_clientdata(client));
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int tsl2550_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+ return tsl2550_set_power_state(client, 0);
+}
+
+static int tsl2550_resume(struct i2c_client *client)
+{
+ return tsl2550_set_power_state(client, 1);
+}
+
+#else
+
+#define tsl2550_suspend NULL
+#define tsl2550_resume NULL
+
+#endif /* CONFIG_PM */
+
+static const struct i2c_device_id tsl2550_id[] = {
+ { "tsl2550", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tsl2550_id);
+
+static struct i2c_driver tsl2550_driver = {
+ .driver = {
+ .name = TSL2550_DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .suspend = tsl2550_suspend,
+ .resume = tsl2550_resume,
+ .probe = tsl2550_probe,
+ .remove = __devexit_p(tsl2550_remove),
+ .id_table = tsl2550_id,
+};
+
+module_i2c_driver(tsl2550_driver);
+
+MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>");
+MODULE_DESCRIPTION("TSL2550 ambient light sensor driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRIVER_VERSION);
diff --git a/drivers/misc/uid_stat.c b/drivers/misc/uid_stat.c
new file mode 100644
index 00000000..2141124a
--- /dev/null
+++ b/drivers/misc/uid_stat.c
@@ -0,0 +1,156 @@
+/* drivers/misc/uid_stat.c
+ *
+ * Copyright (C) 2008 - 2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/atomic.h>
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/proc_fs.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stat.h>
+#include <linux/uid_stat.h>
+#include <net/activity_stats.h>
+
+static DEFINE_SPINLOCK(uid_lock);
+static LIST_HEAD(uid_list);
+static struct proc_dir_entry *parent;
+
+struct uid_stat {
+ struct list_head link;
+ uid_t uid;
+ atomic_t tcp_rcv;
+ atomic_t tcp_snd;
+};
+
+static struct uid_stat *find_uid_stat(uid_t uid) {
+ unsigned long flags;
+ struct uid_stat *entry;
+
+ spin_lock_irqsave(&uid_lock, flags);
+ list_for_each_entry(entry, &uid_list, link) {
+ if (entry->uid == uid) {
+ spin_unlock_irqrestore(&uid_lock, flags);
+ return entry;
+ }
+ }
+ spin_unlock_irqrestore(&uid_lock, flags);
+ return NULL;
+}
+
+static int tcp_snd_read_proc(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len;
+ unsigned int bytes;
+ char *p = page;
+ struct uid_stat *uid_entry = (struct uid_stat *) data;
+ if (!data)
+ return 0;
+
+ bytes = (unsigned int) (atomic_read(&uid_entry->tcp_snd) + INT_MIN);
+ p += sprintf(p, "%u\n", bytes);
+ len = (p - page) - off;
+ *eof = (len <= count) ? 1 : 0;
+ *start = page + off;
+ return len;
+}
+
+static int tcp_rcv_read_proc(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len;
+ unsigned int bytes;
+ char *p = page;
+ struct uid_stat *uid_entry = (struct uid_stat *) data;
+ if (!data)
+ return 0;
+
+ bytes = (unsigned int) (atomic_read(&uid_entry->tcp_rcv) + INT_MIN);
+ p += sprintf(p, "%u\n", bytes);
+ len = (p - page) - off;
+ *eof = (len <= count) ? 1 : 0;
+ *start = page + off;
+ return len;
+}
+
+/* Create a new entry for tracking the specified uid. */
+static struct uid_stat *create_stat(uid_t uid) {
+ unsigned long flags;
+ char uid_s[32];
+ struct uid_stat *new_uid;
+ struct proc_dir_entry *entry;
+
+ /* Create the uid stat struct and append it to the list. */
+ if ((new_uid = kmalloc(sizeof(struct uid_stat), GFP_KERNEL)) == NULL)
+ return NULL;
+
+ new_uid->uid = uid;
+ /* Counters start at INT_MIN, so we can track 4GB of network traffic. */
+ atomic_set(&new_uid->tcp_rcv, INT_MIN);
+ atomic_set(&new_uid->tcp_snd, INT_MIN);
+
+ spin_lock_irqsave(&uid_lock, flags);
+ list_add_tail(&new_uid->link, &uid_list);
+ spin_unlock_irqrestore(&uid_lock, flags);
+
+ sprintf(uid_s, "%d", uid);
+ entry = proc_mkdir(uid_s, parent);
+
+ /* Keep reference to uid_stat so we know what uid to read stats from. */
+ create_proc_read_entry("tcp_snd", S_IRUGO, entry , tcp_snd_read_proc,
+ (void *) new_uid);
+
+ create_proc_read_entry("tcp_rcv", S_IRUGO, entry, tcp_rcv_read_proc,
+ (void *) new_uid);
+
+ return new_uid;
+}
+
+int uid_stat_tcp_snd(uid_t uid, int size) {
+ struct uid_stat *entry;
+ activity_stats_update();
+ if ((entry = find_uid_stat(uid)) == NULL &&
+ ((entry = create_stat(uid)) == NULL)) {
+ return -1;
+ }
+ atomic_add(size, &entry->tcp_snd);
+ return 0;
+}
+
+int uid_stat_tcp_rcv(uid_t uid, int size) {
+ struct uid_stat *entry;
+ activity_stats_update();
+ if ((entry = find_uid_stat(uid)) == NULL &&
+ ((entry = create_stat(uid)) == NULL)) {
+ return -1;
+ }
+ atomic_add(size, &entry->tcp_rcv);
+ return 0;
+}
+
+static int __init uid_stat_init(void)
+{
+ parent = proc_mkdir("uid_stat", NULL);
+ if (!parent) {
+ pr_err("uid_stat: failed to create proc entry\n");
+ return -1;
+ }
+ return 0;
+}
+
+__initcall(uid_stat_init);
diff --git a/drivers/misc/viatel/Kconfig b/drivers/misc/viatel/Kconfig
new file mode 100755
index 00000000..a341da3f
--- /dev/null
+++ b/drivers/misc/viatel/Kconfig
@@ -0,0 +1,14 @@
+menu "Viatelecom OEM funtion support"
+
+config VIATELECOM_SYNC_CBP
+ bool "Ap Sync Cbp"
+ help
+ The synchronous mechanism for the data exchage between Ap and Cbp.
+
+config VIATELECOM_POWER_CBP
+ bool "AP Power Cbp"
+ help
+ Enable this driver to support to control funtion such as reset, power
+ between AP and Cbp.
+
+endmenu
diff --git a/drivers/misc/viatel/Makefile b/drivers/misc/viatel/Makefile
new file mode 100755
index 00000000..fe30dd44
--- /dev/null
+++ b/drivers/misc/viatel/Makefile
@@ -0,0 +1,3 @@
+obj-y += core.o oem.o sync.o # cp_reset.o
+obj-$(CONFIG_VIATELECOM_SYNC_CBP) += sync.o
+obj-$(CONFIG_VIATELECOM_POWER_CBP) += power.o
diff --git a/drivers/misc/viatel/core.c b/drivers/misc/viatel/core.c
new file mode 100755
index 00000000..d06d7a48
--- /dev/null
+++ b/drivers/misc/viatel/core.c
@@ -0,0 +1,49 @@
+/*
+ * core.c
+ *
+ * VIA CBP driver for Linux
+ *
+ * Copyright (C) 2011 VIA TELECOM Corporation, Inc.
+ * Author: VIA TELECOM Corporation, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/platform_device.h>
+
+static struct kobject *viatel_kobj;
+
+struct kobject *viatel_kobject_add(const char *name)
+{
+ struct kobject *kobj = NULL;
+ if(viatel_kobj){
+ kobj = kobject_create_and_add(name, viatel_kobj);
+ }
+
+ return kobj;
+}
+
+static int __init viatel_core_init(void)
+{
+ int ret = 0;
+ viatel_kobj = kobject_create_and_add("viatel", NULL);
+ if (!viatel_kobj){
+ ret = -ENOMEM;
+ goto err_create_kobj;
+ }
+err_create_kobj:
+ return ret;
+}
+
+arch_initcall(viatel_core_init);
diff --git a/drivers/misc/viatel/core.h b/drivers/misc/viatel/core.h
new file mode 100755
index 00000000..d6d323cc
--- /dev/null
+++ b/drivers/misc/viatel/core.h
@@ -0,0 +1,21 @@
+/*
+ * core.h
+ *
+ * VIA CBP driver for Linux
+ *
+ * Copyright (C) 2011 VIA TELECOM Corporation, Inc.
+ * Author: VIA TELECOM Corporation, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+#ifndef VIATEL_CORE_H
+#define VIATEL_CORE_H
+
+extern struct kobject *viatel_kobject_add(const char *name);
+#endif
diff --git a/drivers/misc/viatel/oem.c b/drivers/misc/viatel/oem.c
new file mode 100755
index 00000000..105c4d7d
--- /dev/null
+++ b/drivers/misc/viatel/oem.c
@@ -0,0 +1,752 @@
+/*
+ * viatel_cbp_oem.c
+ *
+ * VIA CBP driver for Linux
+ *
+ * Copyright (C) 2011 VIA TELECOM Corporation, Inc.
+ * Author: VIA TELECOM Corporation, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/interrupt.h>
+#include <mach/viatel.h>
+#include <linux/gpio.h>
+#include <mach/wmt_iomux.h>
+#include <mach/hardware.h>
+#include "linux/delay.h"
+
+#if 1
+
+//#define VIATEL_DEBUG
+#ifdef VIATEL_DEBUG
+#define wmt_dbg(fmt, arg...) printk(fmt, ##arg)
+#else
+#define wmt_dbg(fmt, arg...)
+#endif
+
+int isviatelcom = 0;
+int wk1_high_active = 0;
+int gpio_viatel_4wire[4];
+int ap_ready_always = 0;
+unsigned int wakeup_reg_val;
+unsigned int wakeup_shift_bit;
+int wakeup_type_mask;
+
+extern int wmt_getsyspara(char *varname, char *varval, int *varlen);
+
+int oem_gpio_convert_init(void){
+ char buf[256];
+ int varlen = 256;
+ if( wmt_getsyspara("wmt.modem.viatel.4wire",buf,&varlen) == 0)
+ {
+ sscanf(buf,"%d:%d:%d:%d",&gpio_viatel_4wire[GPIO_VIATEL_USB_AP_RDY],
+ &gpio_viatel_4wire[GPIO_VIATEL_USB_MDM_RDY],
+ &gpio_viatel_4wire[GPIO_VIATEL_USB_AP_WAKE_MDM],
+ &gpio_viatel_4wire[GPIO_VIATEL_USB_MDM_WAKE_AP]);
+
+ printk("4wire %d:%d:%d:%d\n",gpio_viatel_4wire[GPIO_VIATEL_USB_AP_RDY],
+ gpio_viatel_4wire[GPIO_VIATEL_USB_MDM_RDY],
+ gpio_viatel_4wire[GPIO_VIATEL_USB_AP_WAKE_MDM],
+ gpio_viatel_4wire[GPIO_VIATEL_USB_MDM_WAKE_AP]);
+
+ isviatelcom = 1;
+ printk("disable wakeup1 %s %d\n",__FUNCTION__,__LINE__);
+
+
+ if(gpio_viatel_4wire[GPIO_VIATEL_USB_MDM_WAKE_AP]==149){
+ wakeup_reg_val=0x1L;
+ wakeup_shift_bit = 0;
+ wakeup_type_mask = 0xfffffff8;
+ }else{
+ wakeup_reg_val=0x4L;
+ wakeup_shift_bit = 8;
+ wakeup_type_mask = 0xfffff8ff;
+ }
+
+
+ PMCIE_VAL &=(~wakeup_reg_val);
+ INT_TRG_EN_VAL&=(~wakeup_reg_val);
+
+ if(wmt_getsyspara("wmt.modem.viatel.aprdy",buf,&varlen) == 0){
+ sscanf(buf,"%d",&ap_ready_always);
+ }else{
+ ap_ready_always = 0;/* default value */
+ }
+
+ return 0;
+ }
+
+ return -1;
+}
+
+int oem_gpio_request(int gpio, const char *label)
+{
+ wmt_dbg("%s index=%d gpio=%d\n",__FUNCTION__,gpio,gpio_viatel_4wire[gpio]);
+ gpio_request(gpio_viatel_4wire[gpio], label);
+ return 0;
+}
+
+void oem_gpio_free(int gpio)
+{
+ wmt_dbg("%s index=%d gpio=%d\n",__FUNCTION__,gpio,gpio_viatel_4wire[gpio]);
+ gpio_free(gpio_viatel_4wire[gpio]);
+ return ;
+}
+
+/*config the gpio to be input for irq if the SOC need*/
+int oem_gpio_direction_input_for_irq(int gpio)
+{
+ if(!isviatelcom)
+ return 0;
+ wmt_dbg("%s index=%d gpio=%d\n",__FUNCTION__,gpio,gpio_viatel_4wire[gpio]);
+
+ wmt_gpio_setpull(gpio_viatel_4wire[gpio], WMT_GPIO_PULL_DOWN);
+ gpio_direction_input(gpio_viatel_4wire[gpio]);
+ gpio_re_enabled(gpio_viatel_4wire[gpio]);
+
+ return 0;
+}
+
+int oem_gpio_direction_output(int gpio, int value)
+{
+ if(!isviatelcom)
+ return 0;
+ wmt_dbg("%s index=%d gpio=%d\n",__FUNCTION__,gpio,gpio_viatel_4wire[gpio]);
+ if(ap_ready_always&&(gpio == GPIO_VIATEL_USB_AP_RDY))
+ return 0;
+
+
+ gpio_re_enabled(gpio_viatel_4wire[gpio]);
+ gpio_direction_output(gpio_viatel_4wire[gpio], value);
+
+ return 0;
+}
+
+int oem_gpio_output(int gpio, int value)
+{
+ if(!isviatelcom)
+ return 0;
+ wmt_dbg("%s index=%d gpio=%d\n",__FUNCTION__,gpio,gpio_viatel_4wire[gpio]);
+
+ gpio_re_enabled(gpio_viatel_4wire[gpio]);
+ gpio_direction_output(gpio_viatel_4wire[gpio], value);
+
+ return 0;
+}
+
+int oem_gpio_is_wakeup(int gpio)
+{
+ if(gpio==149||gpio==152) //wekup0 gpio index WMT_PIN_GP62_WAKEUP0
+ return 1;
+ else
+ return 0;
+}
+
+
+/*
+ * Get the output level if the gpio is output type;
+ * Get the input level if the gpio is input type
+ */
+int oem_gpio_get_value(int gpio)
+{
+ int rtn;
+ if(!isviatelcom)
+ return 0;
+ wmt_gpio_setpull(gpio_viatel_4wire[gpio], WMT_GPIO_PULL_DOWN);
+ gpio_direction_input(gpio_viatel_4wire[gpio]);
+ gpio_re_enabled(gpio_viatel_4wire[gpio]);
+ rtn = __gpio_get_value(gpio_viatel_4wire[gpio]);
+ wmt_dbg("%s index=%d gpio=%d,rtn=%d\n",__FUNCTION__,gpio,gpio_viatel_4wire[gpio],rtn);
+ if(oem_gpio_is_wakeup(gpio_viatel_4wire[gpio])&&(!wk1_high_active))
+ return !rtn;
+ else
+ return rtn;
+}
+
+
+
+
+int oem_gpio_to_irq(int gpio)
+{
+ wmt_dbg("%s index=%d gpio=%d\n",__FUNCTION__,gpio,gpio_viatel_4wire[gpio]);
+ return IRQ_GPIO;
+}
+
+/*
+ * Set the irq type of the pin.
+ * Get the pin level and set the correct edge if the type is both edge and
+ * the SOC do not support both edge detection at one time
+ */
+int oem_gpio_set_irq_type(int gpio, unsigned int type)
+{
+ if(!isviatelcom)
+ return 0;
+
+ wmt_dbg("%s index=%d gpio=%d\n",__FUNCTION__,gpio,gpio_viatel_4wire[gpio]);
+ if(oem_gpio_is_wakeup(gpio_viatel_4wire[gpio])){
+ int wakeup_type;
+ if(type==IRQF_TRIGGER_RISING){
+ if(wk1_high_active)
+ wakeup_type=(0x3<<wakeup_shift_bit);
+ else
+ wakeup_type=(0x2<<wakeup_shift_bit);
+ }
+ else if(type==(IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
+ wakeup_type=(0x4<<wakeup_shift_bit);
+ else{
+ printk("\n\n\n\n\n=======oem_gpio_set_irq_type error\n\n\n\n\n\n");
+ return 0;
+ }
+
+ INT_TYPE0_VAL = (INT_TYPE0_VAL&wakeup_type_mask)|wakeup_type;
+ return 0;
+ }
+
+ if(type < IRQF_TRIGGER_NONE ||type >=IRQF_TRIGGER_HIGH)
+ printk("error %s %d gpio %d type 0x%x\n",__FUNCTION__,__LINE__,gpio_viatel_4wire[gpio],type);
+ return wmt_gpio_set_irq_type(gpio_viatel_4wire[gpio], type);
+}
+
+
+int oem_gpio_request_irq(int gpio, irq_handler_t handler, unsigned long flags,
+ const char *name, void *dev)
+{
+ if(!isviatelcom)
+ return 0;
+ wmt_dbg("%s index=%d gpio=%d\n",__FUNCTION__,gpio,gpio_viatel_4wire[gpio]);
+ if(oem_gpio_is_wakeup(gpio_viatel_4wire[gpio]))
+ return 0;
+
+ return request_irq(oem_gpio_to_irq(gpio), handler, flags, name, dev);
+}
+
+void oem_gpio_irq_mask(int gpio)
+{
+ if(!isviatelcom)
+ return ;
+
+ wmt_dbg("%s index=%d gpio=%d\n",__FUNCTION__,gpio,gpio_viatel_4wire[gpio]);
+ if(oem_gpio_is_wakeup(gpio_viatel_4wire[gpio])){
+ PMCIE_VAL &=(~wakeup_reg_val);
+ INT_TRG_EN_VAL&=(~wakeup_reg_val);
+ return ;
+ }
+
+ wmt_gpio_mask_irq(gpio_viatel_4wire[gpio]);
+
+ return ;
+}
+void oem_gpio_irq_unmask(int gpio)
+{
+ if(!isviatelcom)
+ return;
+
+ wmt_dbg("%s index=%d gpio=%d\n",__FUNCTION__,gpio,gpio_viatel_4wire[gpio]);
+ if(oem_gpio_is_wakeup(gpio_viatel_4wire[gpio])){
+ int i;
+ for(i=0;i<100;i++){
+ PMCIE_VAL |=wakeup_reg_val;
+ INT_TRG_EN_VAL|=wakeup_reg_val;
+ if(PMCIE_VAL&wakeup_reg_val){
+ break;
+ }
+ printk("PMCIE_VAL set error %x,retry %d\n", PMCIE_VAL,i);
+ mdelay(10);
+ }
+ return ;
+ }
+
+ wmt_gpio_unmask_irq(gpio_viatel_4wire[gpio]);
+
+ return ;
+}
+
+int oem_gpio_irq_isenable(int gpio){
+ if(!isviatelcom)
+ return 0;
+
+ if(oem_gpio_is_wakeup(gpio_viatel_4wire[gpio]))
+ return 1;
+
+ return is_gpio_irqenable(gpio_viatel_4wire[gpio]);
+}
+
+int oem_gpio_irq_isint(int gpio){
+ if(!isviatelcom)
+ return 0;
+
+ if(oem_gpio_is_wakeup(gpio_viatel_4wire[gpio]))
+ return 1;
+
+ return gpio_irqstatus(gpio_viatel_4wire[gpio]);
+
+}
+int oem_gpio_irq_clear(int gpio){
+ if(!isviatelcom)
+ return 1;
+
+ if(oem_gpio_is_wakeup(gpio_viatel_4wire[gpio]))
+ return 0;
+
+ wmt_gpio_ack_irq(gpio_viatel_4wire[gpio]);
+
+ return 0;
+}
+
+#endif
+#if 0
+
+#if defined(CONFIG_MACH_OMAP_KUNLUN)
+int oem_gpio_request(int gpio, const char *label)
+{
+ return gpio_request(gpio, label);
+}
+
+void oem_gpio_free(int gpio)
+{
+ gpio_free(gpio);
+}
+
+/*config the gpio to be input for irq if the SOC need*/
+int oem_gpio_direction_input_for_irq(int gpio)
+{
+ return gpio_direction_input(gpio);
+}
+
+int oem_gpio_direction_output(int gpio, int value)
+{
+ return gpio_direction_output(gpio, value);
+}
+
+/*
+ * Get the output level if the gpio is output type;
+ * Get the input level if the gpio is input type
+ */
+int oem_gpio_get_value(int gpio)
+{
+ return gpio_get_value(gpio);
+}
+
+int oem_gpio_to_irq(int gpio)
+{
+ return gpio_to_irq(gpio);
+}
+
+/*
+ * Set the irq type of the pin.
+ * Get the pin level and set the correct edge if the type is both edge and
+ * the SOC do not support both edge detection at one time
+ */
+int oem_gpio_set_irq_type(unsigned gpio, unsigned int type)
+{
+ return set_irq_type(oem_gpio_to_irq(gpio), type);
+}
+
+
+int oem_gpio_request_irq(int gpio, irq_handler_t handler, unsigned long flags,
+ const char *name, void *dev)
+{
+ return request_irq(oem_gpio_to_irq(gpio), handler, flags, name, dev);
+}
+
+void oem_gpio_irq_mask(int gpio)
+{
+ return ;
+}
+
+void oem_gpio_irq_unmask(int gpio)
+{
+ return ;
+}
+
+
+
+#endif
+
+#if defined(CONFIG_SOC_JZ4770)
+int oem_gpio_request(int gpio, const char *label)
+{
+ return gpio_request(gpio, label);
+}
+
+void oem_gpio_free(int gpio)
+{
+ gpio_free(gpio);
+}
+
+/*config the gpio to be input for irq if the SOC need*/
+int oem_gpio_direction_input_for_irq(int gpio)
+{
+ return 0;
+}
+
+int oem_gpio_direction_output(int gpio, int value)
+{
+ return gpio_direction_output(gpio, value);
+}
+
+/*
+ * Get the output level if the gpio is output type;
+ * Get the input level if the gpio is input type
+ */
+int oem_gpio_get_value(int gpio)
+{
+ return gpio_get_value(gpio);
+}
+
+int oem_gpio_to_irq(int gpio)
+{
+ return gpio_to_irq(gpio);
+}
+
+#define GPIO_DEBOUNCE (3)
+int read_gpio_pin(int pin)
+{
+ int t, v;
+ int i;
+
+ i = GPIO_DEBOUNCE;
+
+ v = t = 0;
+
+ while (i--) {
+ t = __gpio_get_pin(pin);
+ if (v != t)
+ i = GPIO_DEBOUNCE;
+
+ v = t;
+ ndelay(100);
+ }
+
+ return v;
+}
+
+int oem_gpio_set_irq_type(int gpio, unsigned int type)
+{
+ if(type == IRQ_TYPE_EDGE_BOTH){
+ if(read_gpio_pin(gpio)){
+ type = IRQ_TYPE_EDGE_FALLING;
+ }else{
+ type = IRQ_TYPE_EDGE_RISING;
+ }
+ }
+
+ if(type == IRQ_TYPE_LEVEL_MASK){
+ if(read_gpio_pin(gpio)){
+ type = IRQ_TYPE_LEVEL_LOW;
+ }else{
+ type = IRQ_TYPE_LEVEL_HIGH;
+ }
+ }
+
+ switch(type){
+ case IRQ_TYPE_EDGE_RISING:
+ __gpio_as_irq_rise_edge(gpio);
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ __gpio_as_irq_fall_edge(gpio);
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ __gpio_as_irq_high_level(gpio);
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ __gpio_as_irq_low_level(gpio);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int oem_gpio_request_irq(int gpio, irq_handler_t handler, unsigned long flags,
+ const char *name, void *dev)
+{
+ int ret = request_irq(oem_gpio_to_irq(gpio), handler, flags, name, dev);
+ enable_irq_wake(oem_gpio_to_irq(gpio));
+ return ret;
+}
+
+void oem_gpio_irq_mask(int gpio)
+{
+ return ;
+}
+
+void oem_gpio_irq_unmask(int gpio)
+{
+ return ;
+}
+#endif
+
+#if defined(EVDO_DT_SUPPORT)
+#include <linux/interrupt.h>
+int oem_gpio_request(int gpio, const char *label)
+{
+ return 0;
+}
+
+void oem_gpio_free(int gpio)
+{
+ return ;
+}
+
+/*config the gpio to be input for irq if the SOC need*/
+int oem_gpio_direction_input_for_irq(int gpio)
+{
+ mt_set_gpio_mode(gpio, GPIO_MODE_02);
+ mt_set_gpio_dir(gpio, GPIO_DIR_IN);
+ return 0;
+}
+
+int oem_gpio_direction_output(int gpio, int value)
+{
+ mt_set_gpio_mode(gpio, GPIO_MODE_GPIO);
+ mt_set_gpio_dir(gpio, GPIO_DIR_OUT);
+ mt_set_gpio_out(gpio, !!value);
+ return 0;
+}
+
+int oem_gpio_get_value(int gpio)
+{
+ if(GPIO_DIR_IN == mt_get_gpio_dir(gpio)){
+ return mt_get_gpio_in(gpio);
+ }else{
+ return mt_get_gpio_out(gpio);
+ }
+}
+
+typedef struct mtk_oem_gpio_des{
+ int gpio;
+ int irq;
+ void (*redirect)(void);
+ irq_handler_t handle;
+ void *data;
+}mtk_oem_gpio_des;
+
+static void gpio_irq_handle_usb_mdm_rdy(void);
+static void gpio_irq_handle_usb_mdm_wake_ap(void);
+static void gpio_irq_handle_uart_mdm_wake_ap(void);
+static void gpio_irq_handle_rst_ind(void);
+static void gpio_irq_handle_pwr_ind(void);
+
+mtk_oem_gpio_des oem_gpio_list[] = {
+ {GPIO_VIATEL_USB_MDM_RDY, 4, gpio_irq_handle_usb_mdm_rdy, NULL, NULL},
+ {GPIO_VIATEL_USB_MDM_WAKE_AP, 29, gpio_irq_handle_usb_mdm_wake_ap, NULL, NULL},
+ {GPIO_VIATEL_MDM_RST_IND, 28, gpio_irq_handle_rst_ind, NULL, NULL},
+ {GPIO_VIATEL_UART_MDM_WAKE_AP, 27, gpio_irq_handle_uart_mdm_wake_ap, NULL, NULL},
+ {GPIO_VIATEL_MDM_PWR_IND, 27, gpio_irq_handle_pwr_ind, NULL, NULL},
+};
+
+static mtk_oem_gpio_des* gpio_des_find_by_gpio(int gpio)
+{
+ int i = 0;
+ mtk_oem_gpio_des *des = NULL;
+
+ if(gpio < 0){
+ return NULL;
+ }
+
+ for(i=0; i < sizeof(oem_gpio_list) / sizeof(mtk_oem_gpio_des); i++){
+ des = oem_gpio_list + i;
+ if(des->gpio == gpio){
+ return des;
+ }
+ }
+
+ return NULL;
+}
+
+static mtk_oem_gpio_des* gpio_des_find_by_irq(int irq)
+{
+ int i = 0;
+ mtk_oem_gpio_des *des = NULL;
+
+ for(i=0; i < sizeof(oem_gpio_list) / sizeof(mtk_oem_gpio_des); i++){
+ des = oem_gpio_list + i;
+ if(des->irq == irq){
+ return des;
+ }
+ }
+
+ return NULL;
+}
+static void gpio_irq_handle_usb_mdm_rdy(void)
+{
+ int irq = 0;
+ mtk_oem_gpio_des *des = NULL;
+
+ des = gpio_des_find_by_gpio(GPIO_VIATEL_USB_MDM_RDY);
+ if(des && des->handle){
+ des->handle(des->irq, des->data);
+ }
+}
+static void gpio_irq_handle_usb_mdm_wake_ap(void)
+{
+ int irq = 0;
+ mtk_oem_gpio_des *des = NULL;
+
+ des = gpio_des_find_by_gpio(GPIO_VIATEL_USB_MDM_WAKE_AP);
+ if(des && des->handle){
+ des->handle(des->irq, des->data);
+ }
+}
+
+static void gpio_irq_handle_uart_mdm_wake_ap(void)
+{
+ int irq = 0;
+ mtk_oem_gpio_des *des = NULL;
+
+ des = gpio_des_find_by_gpio(GPIO_VIATEL_UART_MDM_WAKE_AP);
+ if(des && des->handle){
+ des->handle(des->irq, des->data);
+ }
+}
+
+static void gpio_irq_handle_rst_ind(void)
+{
+ int irq = 0;
+ mtk_oem_gpio_des *des = NULL;
+
+ des = gpio_des_find_by_gpio(GPIO_VIATEL_MDM_RST_IND);
+ if(des && des->handle){
+ des->handle(des->irq, des->data);
+ }
+}
+static void gpio_irq_handle_pwr_ind(void)
+{
+ int irq = 0;
+ mtk_oem_gpio_des *des = NULL;
+
+ des = gpio_des_find_by_gpio(GPIO_VIATEL_MDM_PWR_IND);
+ if(des && des->handle){
+ des->handle(des->irq, des->data);
+ }
+}
+
+int oem_gpio_to_irq(int gpio)
+{
+ mtk_oem_gpio_des *des = NULL;
+
+ des = gpio_des_find_by_gpio(gpio);
+ if(NULL == des){
+ printk("%s: no irq for gpio %d\n", __FUNCTION__, gpio);
+ return -1;
+ }else{
+ return des->irq;
+ }
+}
+
+int oem_irq_to_gpio(int irq)
+{
+ mtk_oem_gpio_des *des = NULL;
+
+ des = gpio_des_find_by_irq(irq);
+ if(NULL == des){
+ printk("%s: no gpio for irq %d\n", __FUNCTION__, irq);
+ return -1;
+ }else{
+ return des->gpio;
+ }
+}
+
+int oem_gpio_set_irq_type(int gpio, unsigned int type)
+{
+ int irq, level;
+
+ irq = oem_gpio_to_irq(gpio);
+ if(irq < 0){
+ return irq;
+ }
+
+ level = oem_gpio_get_value(gpio);
+
+ if(type == IRQ_TYPE_EDGE_BOTH){
+ if(level){
+ type = IRQ_TYPE_EDGE_FALLING;
+ }else{
+ type = IRQ_TYPE_EDGE_RISING;
+ }
+ }
+
+ if(type == IRQ_TYPE_LEVEL_MASK){
+ if(level){
+ type = IRQ_TYPE_LEVEL_LOW;
+ }else{
+ type = IRQ_TYPE_LEVEL_HIGH;
+ }
+ }
+
+ mt65xx_eint_set_hw_debounce(irq, 3);
+ switch(type){
+ case IRQ_TYPE_EDGE_RISING:
+ mt65xx_eint_set_sens(irq, MT65xx_EDGE_SENSITIVE);
+ mt65xx_eint_set_polarity(irq, MT65xx_POLARITY_HIGH);
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ mt65xx_eint_set_sens(irq, MT65xx_EDGE_SENSITIVE);
+ mt65xx_eint_set_polarity(irq, MT65xx_POLARITY_LOW);
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ mt65xx_eint_set_sens(irq, MT65xx_LEVEL_SENSITIVE);
+ mt65xx_eint_set_polarity(irq, MT65xx_POLARITY_HIGH);
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ mt65xx_eint_set_sens(irq, MT65xx_LEVEL_SENSITIVE);
+ mt65xx_eint_set_polarity(irq, MT65xx_POLARITY_LOW);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int oem_gpio_request_irq(int gpio, irq_handler_t handler, unsigned long flags,
+ const char *name, void *dev)
+{
+ mtk_oem_gpio_des *des = NULL;
+
+ des = gpio_des_find_by_gpio(gpio);
+ if(des == NULL){
+ return -1;
+ }
+ des->data = dev;
+ des->handle = handler;
+
+ mt65xx_eint_registration(des->irq, 1, 1, des->redirect, 0);
+
+ return 0;
+}
+
+void oem_gpio_irq_mask(int gpio)
+{
+ int irq;
+
+ irq = oem_gpio_to_irq(gpio);
+ if(irq < 0){
+ return ;
+ }
+
+ mt65xx_eint_mask(irq);
+}
+
+void oem_gpio_irq_unmask(int gpio)
+{
+ int irq;
+
+ irq = oem_gpio_to_irq(gpio);
+ if(irq < 0){
+ return ;
+ }
+
+ mt65xx_eint_unmask(irq);
+}
+#endif
+#endif
diff --git a/drivers/misc/viatel/power.c b/drivers/misc/viatel/power.c
new file mode 100755
index 00000000..0fc22cb3
--- /dev/null
+++ b/drivers/misc/viatel/power.c
@@ -0,0 +1,340 @@
+/*
+ * viatel_cbp_power.c
+ *
+ * VIA CBP driver for Linux
+ *
+ * Copyright (C) 2009 VIA TELECOM Corporation, Inc.
+ * Author: VIA TELECOM Corporation, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/ctype.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/irq.h>
+#include <linux/wakelock.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <mach/viatel.h>
+#include "core.h"
+
+#define MDM_RST_LOCK_TIME (120)
+#define MDM_RST_HOLD_DELAY (100) //ms
+#define MDM_PWR_HOLD_DELAY (500) //ms
+
+//#define VIA_AP_MODEM_DEBU
+#ifdef VIA_AP_MODEM_DEBUG
+#undef dbg
+#define dbg(format, arg...) printk("[CBP_POWER]: " format "\n" , ## arg)
+#else
+#undef dbg
+#define dbg(format, arg...) do {} while (0)
+#endif
+
+static struct wake_lock reset_lock;
+
+void oem_power_off_modem(void)
+{
+ if(GPIO_OEM_VALID(GPIO_VIATEL_MDM_PWR_EN)){
+ oem_gpio_direction_output(GPIO_VIATEL_MDM_PWR_EN, 0);
+ }
+
+ if(GPIO_OEM_VALID(GPIO_VIATEL_MDM_RST)){
+ oem_gpio_direction_output(GPIO_VIATEL_MDM_RST, 1);
+ /*just hold the reset pin if no power enable pin*/
+ if(GPIO_OEM_VALID(GPIO_VIATEL_MDM_PWR_EN)){
+ mdelay(MDM_RST_HOLD_DELAY);
+ oem_gpio_direction_output(GPIO_VIATEL_MDM_RST, 0);
+ }
+ }
+}
+EXPORT_SYMBOL(oem_power_off_modem);
+
+ssize_t modem_power_show(
+ struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ int power = 0;
+ int ret = 0;
+
+ if(GPIO_OEM_VALID(GPIO_VIATEL_MDM_PWR_IND)){
+ power = !!oem_gpio_get_value(GPIO_VIATEL_MDM_PWR_IND);
+ }else if(GPIO_OEM_VALID(GPIO_VIATEL_MDM_PWR_EN)){
+ printk("No MDM_PWR_IND, just detect MDM_PWR_EN\n");
+ power = !!oem_gpio_get_value(GPIO_VIATEL_MDM_PWR_EN);
+ }else if(GPIO_OEM_VALID(GPIO_VIATEL_MDM_RST)){
+ printk("No MDM_PWR_IND, just detect MDM_PWR_RST\n");
+ power = !!oem_gpio_get_value(GPIO_VIATEL_MDM_RST);
+ }
+ if(power){
+ ret += sprintf(buf + ret, "on\n");
+ }else{
+ ret += sprintf(buf + ret, "off\n");
+ }
+
+ return ret;
+}
+
+ssize_t modem_power_store(
+ struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ int power;
+
+ /* power the modem */
+ if ( !strncmp(buf, "on", strlen("on"))) {
+ power = 1;
+ }else if(!strncmp(buf, "off", strlen("off"))){
+ power = 0;
+ }else{
+ printk("%s: input %s is invalid.\n", __FUNCTION__, buf);
+ return n;
+ }
+
+ printk("Warnning: Power %s modem\n", power ? "on" : "off");
+ if(power){
+#if 0
+ if(GPIO_OEM_VALID(GPIO_VIATEL_MDM_RST)){
+ oem_gpio_direction_output(GPIO_VIATEL_MDM_RST, 1);
+ mdelay(MDM_RST_HOLD_DELAY);
+ }
+ if(GPIO_OEM_VALID(GPIO_VIATEL_MDM_PWR_EN)){
+ oem_gpio_direction_output(GPIO_VIATEL_MDM_PWR_EN, 0);
+ mdelay(MDM_PWR_HOLD_DELAY);
+ }
+
+ if(GPIO_OEM_VALID(GPIO_VIATEL_MDM_PWR_EN)){
+ oem_gpio_direction_output(GPIO_VIATEL_MDM_PWR_EN, 1);
+ mdelay(MDM_PWR_HOLD_DELAY);
+ }
+
+ if(GPIO_OEM_VALID(GPIO_VIATEL_MDM_RST)){
+ oem_gpio_direction_output(GPIO_VIATEL_MDM_RST, 0);
+ mdelay(MDM_RST_HOLD_DELAY);
+ }
+
+ if(GPIO_OEM_VALID(GPIO_VIATEL_MDM_PWR_EN)){
+ oem_gpio_direction_output(GPIO_VIATEL_MDM_PWR_EN, 0);
+ }
+#endif
+
+ if(GPIO_OEM_VALID(GPIO_VIATEL_MDM_PWR_EN)){
+ if(GPIO_OEM_VALID(GPIO_VIATEL_MDM_RST)){
+ oem_gpio_direction_output(GPIO_VIATEL_MDM_RST, 0);
+ mdelay(MDM_RST_HOLD_DELAY);
+ }
+ oem_gpio_direction_output(GPIO_VIATEL_MDM_PWR_EN, 1);
+ mdelay(MDM_PWR_HOLD_DELAY);
+ }
+ }else{
+ oem_power_off_modem();
+ }
+ return n;
+}
+
+ssize_t modem_reset_show(
+ struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ int reset = 0;
+ int ret = 0;
+ if(GPIO_OEM_VALID(GPIO_VIATEL_MDM_RST_IND)){
+ reset = !!oem_gpio_get_value(GPIO_VIATEL_MDM_RST_IND);
+ }else if(GPIO_OEM_VALID(GPIO_VIATEL_MDM_RST)){
+ reset = !!oem_gpio_get_value(GPIO_VIATEL_MDM_RST);
+ }
+
+ if(reset){
+ ret += sprintf(buf + ret, "reset\n");
+ }else{
+ ret += sprintf(buf + ret, "work\n");
+ }
+
+ return ret;
+}
+
+ssize_t modem_reset_store(
+ struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ /* reset the modem */
+ if ( !strncmp(buf, "reset", strlen("reset"))) {
+ wake_lock_timeout(&reset_lock, MDM_RST_LOCK_TIME * HZ);
+ if(GPIO_OEM_VALID(GPIO_VIATEL_MDM_RST)){
+ oem_gpio_direction_output(GPIO_VIATEL_MDM_RST, 1);
+ mdelay(MDM_RST_HOLD_DELAY);
+ oem_gpio_direction_output(GPIO_VIATEL_MDM_RST, 0);
+ mdelay(MDM_RST_HOLD_DELAY);
+ }
+ printk("Warnning: reset modem\n");
+ }
+
+ return n;
+}
+
+ssize_t modem_ets_select_show(
+ struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ int level = 0;
+ int ret = 0;
+
+ if(GPIO_OEM_VALID(GPIO_VIATEL_MDM_ETS_SEL)){
+ level = !!oem_gpio_get_value(GPIO_VIATEL_MDM_ETS_SEL);
+ }
+
+ ret += sprintf(buf, "%d\n", level);
+ return ret;
+}
+
+ssize_t modem_ets_select_store(
+ struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ if(GPIO_OEM_VALID(GPIO_VIATEL_MDM_ETS_SEL)){
+ if ( !strncmp(buf, "1", strlen("1"))) {
+ oem_gpio_direction_output(GPIO_VIATEL_MDM_ETS_SEL, 1);
+ }else if( !strncmp(buf, "0", strlen("0"))){
+ oem_gpio_direction_output(GPIO_VIATEL_MDM_ETS_SEL, 0);
+ }else{
+ dbg("Unknow command.\n");
+ }
+ }
+
+ return n;
+}
+
+ssize_t modem_boot_select_show(
+ struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ int ret = 0;
+ int level = 0;
+
+ if(GPIO_OEM_VALID(GPIO_VIATEL_MDM_BOOT_SEL)){
+ level = !!oem_gpio_get_value(GPIO_VIATEL_MDM_BOOT_SEL);
+ }
+
+ ret += sprintf(buf, "%d\n", level);
+ return ret;
+}
+
+ssize_t modem_boot_select_store(
+ struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ if(GPIO_OEM_VALID(GPIO_VIATEL_MDM_BOOT_SEL)){
+ if ( !strncmp(buf, "1", strlen("1"))) {
+ oem_gpio_direction_output(GPIO_VIATEL_MDM_BOOT_SEL, 1);
+ }else if( !strncmp(buf, "0", strlen("0"))){
+ oem_gpio_direction_output(GPIO_VIATEL_MDM_BOOT_SEL, 0);
+ }else{
+ dbg("Unknow command.\n");
+ }
+ }
+
+ return n;
+}
+
+#define modem_attr(_name) \
+static struct kobj_attribute _name##_attr = { \
+ .attr = { \
+ .name = __stringify(_name), \
+ .mode = 0644, \
+ }, \
+ .show = modem_##_name##_show, \
+ .store = modem_##_name##_store, \
+}
+
+modem_attr(power);
+modem_attr(reset);
+modem_attr(ets_select);
+modem_attr(boot_select);
+
+static struct attribute *g_attr[] = {
+ &power_attr.attr,
+ &reset_attr.attr,
+ &ets_select_attr.attr,
+ &boot_select_attr.attr,
+ NULL
+};
+
+static struct attribute_group g_attr_group = {
+ .attrs = g_attr,
+};
+
+
+static void modem_shutdown(struct platform_device *dev)
+{
+ oem_power_off_modem();
+}
+
+static struct platform_driver power_driver = {
+ .driver.name = "modem_power",
+ .shutdown = modem_shutdown,
+};
+
+static struct platform_device power_device = {
+ .name = "modem_power",
+};
+
+static struct kobject *modem_kobj;
+static int __init viatel_power_init(void)
+{
+ int ret = 0;
+
+ modem_kobj = viatel_kobject_add("modem");
+ if(!modem_kobj){
+ ret = -ENOMEM;
+ goto err_create_kobj;
+ }
+
+ ret = platform_device_register(&power_device);
+ if (ret) {
+ printk("platform_device_register failed\n");
+ goto err_platform_device_register;
+ }
+ ret = platform_driver_register(&power_driver);
+ if (ret) {
+ printk("platform_driver_register failed\n");
+ goto err_platform_driver_register;
+ }
+
+ wake_lock_init(&reset_lock, WAKE_LOCK_SUSPEND, "cbp_rst");
+ if(GPIO_OEM_VALID(GPIO_VIATEL_MDM_PWR_IND)){
+ oem_gpio_direction_input_for_irq(GPIO_VIATEL_MDM_PWR_IND);
+ }
+ if(GPIO_OEM_VALID(GPIO_VIATEL_MDM_RST_IND)){
+ oem_gpio_direction_input_for_irq(GPIO_VIATEL_MDM_RST_IND);
+ }
+
+ //oem_gpio_direction_output(GPIO_VIATEL_MDM_RST, 0);
+ //oem_gpio_direction_output(GPIO_VIATEL_MDM_PWR_EN, 1);
+ ret = sysfs_create_group(modem_kobj, &g_attr_group);
+
+ if(ret){
+ printk("sysfs_create_group failed\n");
+ goto err_sysfs_create_group;
+ }
+
+ return 0;
+err_sysfs_create_group:
+ platform_driver_unregister(&power_driver);
+err_platform_driver_register:
+ platform_device_unregister(&power_device);
+err_platform_device_register:
+err_create_kobj:
+ return ret;
+}
+
+static void __exit viatel_power_exit(void)
+{
+ wake_lock_destroy(&reset_lock);
+}
+
+late_initcall_sync(viatel_power_init);
+module_exit(viatel_power_exit);
diff --git a/drivers/misc/viatel/sync.c b/drivers/misc/viatel/sync.c
new file mode 100755
index 00000000..08ddf8ba
--- /dev/null
+++ b/drivers/misc/viatel/sync.c
@@ -0,0 +1,2445 @@
+/*
+ * viatel_cbp_sync.c
+ *
+ * VIA CBP driver for Linux
+ *
+ * Copyright (C) 2011 VIA TELECOM Corporation, Inc.
+ * Author: VIA TELECOM Corporation, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/time.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/sched.h>
+#include <linux/wakelock.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/platform_device.h>
+#include <mach/viatel.h>
+#include "core.h"
+
+//define in oem.c
+extern int isviatelcom;
+extern int ap_ready_always;
+
+static int asc_debug = 0;
+#define ASCDPRT(fmt, arg...) do{ \
+ if(asc_debug) \
+ printk("[ASC] " fmt, ##arg); \
+ }while(0)
+#define ASCPRT(fmt, arg...) printk("[ASC] " fmt, ##arg)
+
+/*mS*/
+#define ASC_RX_WAIT_IDLE_TIME (1000)
+#define ASC_TX_WAIT_READY_TIME (1000)
+#define ASC_TX_WAIT_IDLE_TIME (2000)
+#define ASC_TX_AUTO_DELAY_TIME (2000)
+#define ASC_TX_WAIT_SLEEP_TIME (500)
+#define ASC_TX_TRY_TIMES (3)
+#define ASC_TX_DEBOUNCE_TIME (10)
+
+#define ASC_TX_SYSFS_USER "AscApp"
+#define ASC_TX_AUTO_USER "AscAuto"
+
+/* asc_list contains all registered struct struct asc_handle*/
+static DEFINE_SPINLOCK(hdlock);
+static LIST_HEAD(asc_tx_handle_list);
+static LIST_HEAD(asc_rx_handle_list);
+static struct workqueue_struct *asc_work_queue;
+static struct kobject *asc_kobj;
+
+typedef enum{
+ ASC_TX_HD = 0,
+ ASC_RX_HD
+}asc_handle_type;
+
+#define ASC_EVENT_POOL_MAX (60)
+typedef enum{
+ ASC_EVENT_UNUSE = 0,
+ ASC_EVENT_STATIC,
+ ASC_EVENT_DYNAMIC
+}asc_event_type;
+
+struct asc_event{
+ int id;
+ struct list_head list;
+ char usage;
+};
+
+static struct asc_event event_pool[ASC_EVENT_POOL_MAX];
+
+struct asc_user{
+ struct asc_infor infor;
+ atomic_t count;
+ struct list_head node;
+};
+
+struct asc_state_dsp{
+ char name[ASC_NAME_LEN];
+ /*state callback handle for events*/
+ int (*handle)(void * hd, int event);
+};
+
+/* TX STATUS and TX EVENT*/
+typedef enum{
+ AP_TX_EVENT_REQUEST = 0, /*internal*/
+ AP_TX_EVENT_CP_READY,
+ AP_TX_EVENT_CP_UNREADY,
+ AP_TX_EVENT_WAIT_TIMEOUT,
+ AP_TX_EVENT_IDLE_TIMEOUT,
+ AP_TX_EVENT_STOP,
+ AP_TX_EVENT_RESET,
+ AP_TX_EVENT_NUM
+} ap_tx_event;
+
+typedef enum{
+ AP_TX_ST_SLEEP = 0,
+ AP_TX_ST_WAIT_READY,
+ AP_TX_ST_READY, /*wait All Tx channel finished*/
+ AP_TX_ST_IDLE,
+ AP_TX_ST_NUM
+} ap_tx_state;
+
+struct asc_tx_handle{
+ struct asc_config cfg;
+ atomic_t state;
+ atomic_t count;
+ struct list_head user_list;
+ struct asc_state_dsp *table;
+ /*process the event to switch different states*/
+ struct task_struct *thread;
+ int ntf;
+ int wait_try;
+ int auto_delay;
+ spinlock_t slock;
+ wait_queue_head_t wait;
+ wait_queue_head_t wait_tx_state;
+ struct mutex mlock;
+ struct wake_lock wlock;
+ struct timer_list timer_wait_ready;
+ struct timer_list timer_wait_idle;
+ struct timer_list timer_wait_sleep;
+ struct work_struct ntf_work;
+ struct list_head event_q;
+ struct list_head node;
+ struct kobject *kobj;
+};
+
+static int asc_tx_handle_sleep(void *, int );
+static int asc_tx_handle_wait_ready(void *, int );
+static int asc_tx_handle_ready(void *, int );
+static int asc_tx_handle_idle(void *, int );
+
+/*the table used to discribe all tx states*/
+static struct asc_state_dsp asc_tx_table[AP_TX_ST_NUM] = {
+ [AP_TX_ST_SLEEP] = {
+ .name = "AP_TX_ST_SLEEP",
+ .handle = asc_tx_handle_sleep,
+ },
+ [AP_TX_ST_WAIT_READY] = {
+ .name = "AP_TX_ST_WAIT_READY",
+ .handle = asc_tx_handle_wait_ready,
+ },
+ [AP_TX_ST_READY] = {
+ .name = "AP_TX_ST_READY",
+ .handle = asc_tx_handle_ready,
+ },
+ [AP_TX_ST_IDLE] = {
+ .name = "AP_TX_ST_IDLE",
+ .handle = asc_tx_handle_idle,
+ },
+};
+
+/* RX STATUS and RX EVENT*/
+typedef enum{
+ AP_RX_EVENT_REQUEST = 0,
+ AP_RX_EVENT_AP_READY,
+ AP_RX_EVENT_AP_UNREADY,
+ AP_RX_EVENT_STOP,
+ AP_RX_EVENT_IDLE_TIMEOUT,
+ AP_RX_EVENT_RESET,
+ AP_RX_EVENT_NUM
+} ap_rx_event;
+
+typedef enum{
+ AP_RX_ST_SLEEP = 0,
+ AP_RX_ST_WAIT_READY,
+ AP_RX_ST_READY,
+ AP_RX_ST_IDLE,
+ AP_RX_ST_NUM
+} ap_rx_state;
+
+struct asc_rx_handle{
+ struct asc_config cfg;
+ atomic_t state;
+ struct list_head user_list;
+ struct asc_state_dsp *table;
+ int ntf;
+ /*process the event to switch different states*/
+ struct task_struct *thread;
+ spinlock_t slock;
+ wait_queue_head_t wait;
+ struct mutex mlock;
+ struct wake_lock wlock;
+ struct timer_list timer;
+ struct list_head event_q;
+ struct list_head node;
+ struct work_struct ntf_work;
+};
+
+static struct asc_config tx_cfg,rx_cfg;
+static int asc_rx_handle_sleep(void *, int );
+static int asc_rx_handle_wait_ready(void *, int );
+static int asc_rx_handle_ready(void *, int );
+static int asc_rx_handle_idle(void *, int );
+
+/*the table used to discribe all rx states*/
+static struct asc_state_dsp asc_rx_table[AP_RX_ST_NUM] = {
+ [AP_RX_ST_SLEEP] = {
+ .name = "AP_RX_ST_SLEEP",
+ .handle = asc_rx_handle_sleep,
+ },
+ [AP_RX_ST_WAIT_READY] = {
+ .name = "AP_RX_ST_WAIT_READY",
+ .handle = asc_rx_handle_wait_ready,
+ },
+ [AP_RX_ST_READY] = {
+ .name = "AP_RX_ST_READY",
+ .handle = asc_rx_handle_ready,
+ },
+ [AP_RX_ST_IDLE] = {
+ .name = "AP_RX_ST_IDLE",
+ .handle = asc_rx_handle_idle,
+ },
+};
+
+static int asc_tx_event_send(struct asc_tx_handle *tx, int id);
+static void asc_tx_handle_reset(struct asc_tx_handle *tx);
+static int asc_rx_event_send(struct asc_rx_handle *rx, int id);
+static void asc_rx_handle_reset(struct asc_rx_handle *rx);
+
+static struct asc_event * asc_event_malloc(void)
+{
+ int i = 0;
+ unsigned long flags = 0;
+ struct asc_event *event = NULL;
+
+ spin_lock_irqsave(&hdlock, flags);
+ for(i = 0; i < ASC_EVENT_POOL_MAX; i++){
+ if(ASC_EVENT_UNUSE == event_pool[i].usage){
+ event = &(event_pool[i]);
+ event->usage = ASC_EVENT_STATIC;
+ }
+ }
+
+ if(NULL == event){
+ event = kmalloc(sizeof(struct asc_event), GFP_ATOMIC);
+ if(event){
+ event->usage = ASC_EVENT_DYNAMIC;
+ }
+ }
+ spin_unlock_irqrestore(&hdlock, flags);
+ return event;
+}
+
+static void asc_event_free(struct asc_event * event)
+{
+ unsigned long flags = 0;
+
+ if(!event)
+ return ;
+ spin_lock_irqsave(&hdlock, flags);
+ if(ASC_EVENT_STATIC == event->usage){
+ memset(event, 0, sizeof(struct asc_event));
+ }else{
+ kfree(event);
+ }
+ spin_unlock_irqrestore(&hdlock, flags);
+}
+static irqreturn_t asc_irq_cp_indicate_state(int irq, void *data)
+{
+ int level;
+ struct asc_tx_handle *tx = (struct asc_tx_handle *)data;
+ struct asc_config *cfg = &tx->cfg;
+
+ if (!oem_gpio_irq_isenable(cfg->gpio_ready) ||
+ !oem_gpio_irq_isint(cfg->gpio_ready)){
+
+ return IRQ_NONE;
+
+ }
+ oem_gpio_irq_clear(cfg->gpio_ready);
+ level = !!oem_gpio_get_value(cfg->gpio_ready);
+ oem_gpio_set_irq_type(cfg->gpio_ready, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING);
+ ASCDPRT("Irq %s cp_indicate_ap %s.\n", cfg->name, (level == cfg->polar)?"WAKEN":"SLEEP");
+ if(level == cfg->polar){
+ asc_tx_event_send(tx, AP_TX_EVENT_CP_READY);
+ }else{
+ /*do not care*/
+ //asc_tx_event_send(tx, AP_TX_EVENT_CP_UNREADY);
+ }
+ oem_gpio_irq_unmask(cfg->gpio_ready);
+ return IRQ_HANDLED;
+}
+
+
+
+static irqreturn_t asc_irq_cp_wake_ap(int irq, void *data)
+{
+ int level;
+ struct asc_rx_handle *rx = (struct asc_rx_handle *)data;
+ struct asc_config *cfg = &rx->cfg;
+
+ if(!cfg||cfg->gpio_wake<=0){
+ return IRQ_NONE;
+ }
+
+ if (!oem_gpio_irq_isenable(cfg->gpio_wake) ||
+ !oem_gpio_irq_isint(cfg->gpio_wake)){
+ return IRQ_NONE;
+ }
+
+ oem_gpio_irq_clear(cfg->gpio_wake);
+
+ level = !!oem_gpio_get_value(cfg->gpio_wake);
+ oem_gpio_set_irq_type(cfg->gpio_wake, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING);
+ ASCDPRT("Irq %s cp_wake_ap, requset ap to be %s.\n", cfg->name, (level == cfg->polar)?"WAKEN":"SLEEP");
+
+ if(level == cfg->polar){
+ /*Cp requset Ap wake*/
+ wake_lock(&rx->wlock);
+ /*FIXME: jump to ready as soon as possible to avoid the AP_READY error indication to CBP */
+ if(AP_RX_ST_IDLE == atomic_read(&rx->state)){
+ ASCDPRT("Rx(%s): process event(%d) in state(%s).\n", cfg->name, AP_RX_EVENT_REQUEST, rx->table[AP_RX_ST_IDLE].name);
+ asc_rx_handle_idle(rx, AP_RX_EVENT_REQUEST);
+ ASCDPRT("Rx(%s): go into state(%s).\n", cfg->name, rx->table[atomic_read(&rx->state)].name);
+ }else{
+ asc_rx_event_send(rx, AP_RX_EVENT_REQUEST);
+ }
+ }else{
+ /*Cp allow Ap sleep*/
+ asc_rx_event_send(rx, AP_RX_EVENT_STOP);
+ }
+
+ oem_gpio_irq_unmask(cfg->gpio_wake);
+ return IRQ_HANDLED;
+}
+
+
+
+
+static struct asc_tx_handle *asc_tx_handle_lookup(const char *name)
+{
+ unsigned long flags;
+ struct asc_tx_handle *hd, *tmp, *t;
+
+ if (!name)
+ return NULL;
+
+ hd = NULL;
+
+ spin_lock_irqsave(&hdlock, flags);
+ list_for_each_entry_safe(tmp, t, &asc_tx_handle_list, node) {
+ if (!strncmp(name, tmp->cfg.name, ASC_NAME_LEN - 1)) {
+ hd = tmp;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&hdlock, flags);
+ return hd;
+}
+
+static struct asc_rx_handle *asc_rx_handle_lookup(const char *name)
+{
+ unsigned long flags;
+ struct asc_rx_handle *hd, *tmp, *t;
+
+ if (!name)
+ return NULL;
+
+ hd = NULL;
+ spin_lock_irqsave(&hdlock, flags);
+ list_for_each_entry_safe(tmp, t, &asc_rx_handle_list, node) {
+ if (!strncmp(name, tmp->cfg.name, ASC_NAME_LEN - 1)) {
+ hd = tmp;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&hdlock, flags);
+ return hd;
+}
+
+irqreturn_t viatelcom_irq_cp_wake_ap(int irq, void *data){
+ struct asc_rx_handle *rx;
+ if(!isviatelcom||ap_ready_always)
+ return IRQ_HANDLED;
+ rx = asc_rx_handle_lookup(USB_RX_HD_NAME);
+ return asc_irq_cp_wake_ap(irq,rx);
+}
+
+static struct asc_user *asc_tx_user_lookup(struct asc_tx_handle *tx, const char *name)
+{
+ unsigned long flags = 0;
+ struct asc_user *user = NULL, *tmp = NULL, *t = NULL;
+
+ if (!name)
+ return NULL;
+
+ spin_lock_irqsave(&tx->slock, flags);
+ list_for_each_entry_safe(tmp, t, &tx->user_list, node) {
+ if (!strncmp(name, tmp->infor.name, ASC_NAME_LEN - 1)) {
+ user = tmp;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&tx->slock, flags);
+
+ return user;
+}
+
+static struct asc_user *asc_rx_user_lookup(struct asc_rx_handle *rx, const char *name)
+{
+ unsigned long flags = 0;
+ struct asc_user *user = NULL, *tmp = NULL, *t = NULL;
+
+ if (!name)
+ return NULL;
+
+ spin_lock_irqsave(&rx->slock, flags);
+ list_for_each_entry_safe(tmp, t, &rx->user_list, node) {
+ if (!strncmp(name, tmp->infor.name, ASC_NAME_LEN - 1)) {
+ user = tmp;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&rx->slock, flags);
+
+ return user;
+}
+
+static inline void asc_rx_indicate_wake(struct asc_rx_handle *rx)
+{
+ if(rx->cfg.gpio_ready >= 0)
+ oem_gpio_direction_output(rx->cfg.gpio_ready, rx->cfg.polar);
+}
+
+static inline void asc_rx_indicate_sleep(struct asc_rx_handle *rx)
+{
+ if(rx->cfg.gpio_ready >= 0)
+ oem_gpio_direction_output(rx->cfg.gpio_ready, !rx->cfg.polar);
+}
+
+static int asc_rx_event_send(struct asc_rx_handle *rx, int id)
+{
+ unsigned long flags = 0;
+ struct asc_event *event = NULL;
+ int ret = -ENODEV;
+
+ if(rx->thread == NULL){
+ ASCPRT("%s:no thread for event\n", __FUNCTION__);
+ return ret;
+ }
+
+ /*check whether the event is cared by current charge state*/
+ if(id >= 0){
+ event = asc_event_malloc();
+ if(!event){
+ ASCPRT("No memory to create new event.\n");
+ ret = -ENOMEM;
+ goto send_event_error;
+ }
+ /*insert a new event to the list tail and wakeup the process thread*/
+ //ASCDPRT("Rx(%s):send event(%d) to state(%s).\n", rx->name, id, rx->table[atomic_read(&rx->state)].name);
+ event->id = id;
+ spin_lock_irqsave(&rx->slock, flags);
+ if(AP_RX_EVENT_RESET == id){
+ list_add(&event->list, &rx->event_q);
+ }else{
+ list_add_tail(&event->list, &rx->event_q);
+ }
+ spin_unlock_irqrestore(&rx->slock, flags);
+ wake_up(&rx->wait);
+ }
+ ret = 0;
+send_event_error:
+ return ret;
+}
+
+static int asc_rx_event_recv(struct asc_rx_handle *rx)
+{
+ unsigned long flags = 0;
+ struct asc_event *event = NULL;
+ int ret = -ENODEV;
+
+ if(rx->thread == NULL){
+ ASCPRT("%s:no thread for event\n", __FUNCTION__);
+ return ret;
+ }
+
+ spin_lock_irqsave(&rx->slock, flags);
+ if(!list_empty(&rx->event_q)){
+ event = list_first_entry(&rx->event_q, struct asc_event, list);
+ list_del(&event->list);
+ }
+ spin_unlock_irqrestore(&rx->slock, flags);
+
+ if(event){
+ ret = event->id;
+ asc_event_free(event);
+ }
+ return ret;
+}
+
+static int asc_rx_event_thread(void *data)
+{
+ struct asc_rx_handle *rx = (struct asc_rx_handle *)data;
+ int id = 0, index;
+ char name[ASC_NAME_LEN] = {0};
+ struct asc_state_dsp *dsp = NULL;
+
+ rx->thread = current;
+ snprintf(name, ASC_NAME_LEN, "asc_rx_%s", rx->cfg.name);
+ daemonize(name);
+ ASCDPRT("%s thread start now.\n", name);
+
+ while(1){
+ /*sleep until receive an evnet or thread exist*/
+ wait_event(rx->wait, ((id=asc_rx_event_recv(rx)) >= 0) || (!rx->thread));
+ /*thread is existed*/
+ if(!rx->thread){
+ break;
+ }
+
+ mutex_lock(&rx->mlock);
+ if(AP_RX_EVENT_RESET == id){
+ asc_rx_handle_reset(rx);
+ }else{
+ index = atomic_read(&rx->state);
+ dsp = rx->table + index;
+ if(dsp->handle){
+ ASCDPRT("Rx(%s): process event(%d) in state(%s).\n", rx->cfg.name, id, dsp->name);
+ dsp->handle(rx, id);
+ ASCDPRT("Rx(%s): go into state(%s).\n", rx->cfg.name, rx->table[atomic_read(&rx->state)].name);
+ }
+ }
+ mutex_unlock(&rx->mlock);
+ }
+
+ ASCDPRT("%s thread exit.\n", name);
+ kfree(rx);
+ return 0;
+}
+
+static void asc_rx_event_timer(unsigned long data)
+{
+ struct asc_rx_handle *rx = (struct asc_rx_handle *)data;
+ //ASCDPRT("%s timer is time out.\n", rx->name);
+ asc_rx_event_send(rx, AP_RX_EVENT_IDLE_TIMEOUT);
+}
+
+static void asc_tx_notifier_work(struct work_struct *work)
+{
+ struct asc_infor *infor;
+ struct asc_user *user = NULL, *t = NULL;
+ struct asc_tx_handle *tx = container_of(work, struct asc_tx_handle,
+ ntf_work);
+
+ list_for_each_entry_safe(user, t, &tx->user_list, node){
+ infor = &user->infor;
+ if(infor->notifier){
+ infor->notifier(tx->ntf, infor->data);
+ }
+ }
+}
+
+static void asc_rx_notifier_work(struct work_struct *work)
+{
+ struct asc_infor *infor;
+ struct asc_user *user = NULL, *t = NULL;
+ struct asc_rx_handle *rx = container_of(work, struct asc_rx_handle,
+ ntf_work);
+
+ list_for_each_entry_safe(user, t, &rx->user_list, node){
+ infor = &user->infor;
+ if(infor->notifier){
+ infor->notifier(rx->ntf, infor->data);
+ }
+ }
+}
+
+static void asc_tx_notifier(struct asc_tx_handle *tx, int ntf)
+{
+ tx->ntf = ntf;
+ queue_work(asc_work_queue, &tx->ntf_work);
+}
+
+static void asc_rx_notifier(struct asc_rx_handle *rx, int ntf)
+{
+ rx->ntf = ntf;
+ queue_work(asc_work_queue, &rx->ntf_work);
+}
+
+static int asc_rx_handle_init(struct asc_rx_handle *rx)
+{
+ int ret = 0;
+ char *name = NULL;
+ struct asc_config *cfg = &rx->cfg;
+
+ if(cfg->gpio_ready >= 0){
+ ret = oem_gpio_request(cfg->gpio_ready, "ap_ready");
+ if(ret < 0){
+ ASCPRT("Fail to requset ap_ready gpio %d for %s.\n", cfg->gpio_ready, cfg->name);
+ goto err_request_gpio_ap_ready;
+ }
+ if(ap_ready_always)
+ oem_gpio_output(cfg->gpio_ready, cfg->polar);
+ else
+ asc_rx_indicate_sleep(rx);
+ }
+
+ if(cfg->gpio_wake >= 0){
+ ret = oem_gpio_request(cfg->gpio_wake, "cp_wake_ap");
+ if(ret < 0){
+ ASCPRT("Fail to requset cp_wake_ap gpio %d for %s.\n", cfg->gpio_wake, cfg->name);
+ goto err_request_gpio_cp_wake_ap;
+ }
+
+ oem_gpio_irq_mask(cfg->gpio_wake);
+ oem_gpio_direction_input_for_irq(cfg->gpio_wake);
+ oem_gpio_set_irq_type(cfg->gpio_wake, IRQF_TRIGGER_RISING);
+ ret = oem_gpio_request_irq(cfg->gpio_wake, asc_irq_cp_wake_ap, IRQF_SHARED | IRQF_NO_SUSPEND, "cp_wake_ap", rx);
+ printk("asc_rx_handle_init call oem_gpio_irq_unmask %d\n",cfg->gpio_wake);
+ oem_gpio_irq_unmask(cfg->gpio_wake);
+ if (ret < 0) {
+ ASCPRT("fail to request cp_wake_ap irq for %s\n", cfg->name);
+ goto err_req_irq_cp_wake_ap;
+ }
+ }
+
+ rx->table = asc_rx_table;
+ mutex_init(&rx->mlock);
+ INIT_LIST_HEAD(&rx->event_q);
+ INIT_LIST_HEAD(&rx->user_list);
+ spin_lock_init(&rx->slock);
+ setup_timer(&rx->timer, asc_rx_event_timer, (unsigned long)rx);
+ name = kzalloc(ASC_NAME_LEN, GFP_KERNEL);
+ if(!name){
+ ret = -ENOMEM;
+ ASCPRT("%s: no memory to malloc for wake lock name\n", __FUNCTION__);
+ goto err_malloc_name;
+ }
+ snprintf(name, ASC_NAME_LEN, "asc_rx_%s", rx->cfg.name);
+ wake_lock_init(&rx->wlock, WAKE_LOCK_SUSPEND, name);
+ init_waitqueue_head(&rx->wait);
+ INIT_WORK(&rx->ntf_work, asc_rx_notifier_work);
+ if(ap_ready_always)
+ atomic_set(&rx->state, AP_RX_ST_READY);
+ else
+ atomic_set(&rx->state, AP_RX_ST_SLEEP);
+ ret = kernel_thread(asc_rx_event_thread, rx, 0);
+ if(ret < 0){
+ ASCPRT("Fail to create %s rx thread.\n", rx->cfg.name);
+ goto err_create_rx_thread;
+ }
+
+ if(!!oem_gpio_get_value(cfg->gpio_wake) == cfg->polar){
+ asc_rx_event_send(rx, AP_RX_EVENT_REQUEST);
+ }
+
+ return 0;
+
+err_create_rx_thread:
+ if(name)
+ kfree(name);
+err_malloc_name:
+ if(cfg->gpio_wake >= 0)
+ free_irq(oem_gpio_to_irq(cfg->gpio_wake), rx);
+err_req_irq_cp_wake_ap:
+ if(cfg->gpio_wake)
+ oem_gpio_free(cfg->gpio_wake);
+err_request_gpio_cp_wake_ap:
+ if(cfg->gpio_ready >= 0)
+ oem_gpio_free(cfg->gpio_ready);
+err_request_gpio_ap_ready:
+ return ret;
+}
+
+static int asc_rx_handle_sleep(void *data, int event)
+{
+ int ret = 0;
+ struct asc_rx_handle *rx = (struct asc_rx_handle *)data;
+
+ //ASCDPRT("Rx(%s): process event(%d) in state(%s).\n", rx->name, event, rx->table[atomic_read(&rx->state)].name);
+
+ if(AP_RX_ST_SLEEP != atomic_read(&rx->state)){
+ return 0;
+ }
+
+ switch(event){
+ case AP_RX_EVENT_REQUEST:
+ wake_lock(&rx->wlock);
+ atomic_set(&rx->state, AP_RX_ST_WAIT_READY);
+ asc_rx_notifier(rx, ASC_NTF_RX_PREPARE);
+ break;
+ default:
+ ASCDPRT("ignore the rx event %d in state(%s)", event, rx->table[atomic_read(&rx->state)].name);
+ }
+
+ //ASCDPRT("Rx(%s): go into state(%s).\n", rx->name, rx->table[atomic_read(&rx->state)].name);
+ return ret;
+}
+
+static int asc_rx_handle_wait_ready(void *data, int event)
+{
+ int ret = 0;
+ struct asc_rx_handle *rx = (struct asc_rx_handle *)data;
+
+ //ASCDPRT("Rx(%s): process event(%d) in state(%s).\n", rx->name, event, rx->table[atomic_read(&rx->state)].name);
+
+ if(AP_RX_ST_WAIT_READY != atomic_read(&rx->state)){
+ return 0;
+ }
+
+ switch(event){
+ case AP_RX_EVENT_AP_READY:
+ /*need ack ready to cp, do nothing if no gpio for ap_ready*/
+ asc_rx_indicate_wake(rx);
+ atomic_set(&rx->state, AP_RX_ST_READY);
+ break;
+ case AP_RX_EVENT_AP_UNREADY:
+ case AP_RX_EVENT_STOP:
+ atomic_set(&rx->state, AP_RX_ST_SLEEP);
+ asc_rx_notifier(rx, ASC_NTF_RX_POST);
+ /*need ack ready to cp, do nothing if no gpio for ap_ready*/
+ asc_rx_indicate_sleep(rx);
+ wake_unlock(&rx->wlock);
+ break;
+ default:
+ ASCDPRT("ignore the rx event %d in state(%s)", event, rx->table[atomic_read(&rx->state)].name);
+ }
+
+ //ASCDPRT("Rx(%s): go into state(%s).\n", rx->name, rx->table[atomic_read(&rx->state)].name);
+ return ret;
+}
+
+static int asc_rx_handle_ready(void *data, int event)
+{
+ int ret = 0;
+ struct asc_rx_handle *rx = (struct asc_rx_handle *)data;
+
+ //ASCDPRT("Rx(%s): process event(%d) in state(%s).\n", rx->name, event, rx->table[atomic_read(&rx->state)].name);
+
+ if(AP_RX_ST_READY != atomic_read(&rx->state)){
+ return 0;
+ }
+
+ switch(event){
+ case AP_RX_EVENT_STOP:
+ atomic_set(&rx->state, AP_RX_ST_IDLE);
+ mod_timer(&rx->timer, jiffies + msecs_to_jiffies(ASC_RX_WAIT_IDLE_TIME));
+ break;
+ default:
+ ASCDPRT("ignore the rx event %d in state(%s)", event, rx->table[atomic_read(&rx->state)].name);
+ }
+
+ //ASCDPRT("Rx(%s): go into state(%s).\n", rx->name, rx->table[atomic_read(&rx->state)].name);
+ return ret;
+}
+
+static int asc_rx_handle_idle(void *data, int event)
+{
+ int ret = 0;
+ unsigned long flags = 0;
+ struct asc_rx_handle *rx = (struct asc_rx_handle *)data;
+
+ //ASCDPRT("Rx(%s): process event(%d) in state(%s).\n", rx->name, event, rx->table[atomic_read(&rx->state)].name);
+
+ if(AP_RX_ST_IDLE != atomic_read(&rx->state)){
+ return 0;
+ }
+
+ /*FIXME: prevent from scheduled and interrupted to avoid error indication to CBP*/
+ spin_lock_irqsave(&rx->slock, flags);
+
+ switch(event){
+ case AP_RX_EVENT_REQUEST:
+ del_timer(&rx->timer);
+ atomic_set(&rx->state, AP_RX_ST_READY);
+ break;
+
+ case AP_RX_EVENT_IDLE_TIMEOUT:
+ asc_rx_notifier(rx, ASC_NTF_RX_POST);
+ atomic_set(&rx->state, AP_RX_ST_SLEEP);
+ /*need ack ready to cp, do nothing if no gpio for ap_ready*/
+ asc_rx_indicate_sleep(rx);
+ wake_unlock(&rx->wlock);
+ break;
+
+ default:
+ ASCDPRT("ignore the rx event %d in state(%s)", event, rx->table[atomic_read(&rx->state)].name);
+ }
+
+ spin_unlock_irqrestore(&rx->slock, flags);
+
+ //ASCDPRT("Rx(%s): go into state(%s).\n", rx->name, rx->table[atomic_read(&rx->state)].name);
+ return ret;
+}
+
+static void asc_tx_trig_busy(struct asc_tx_handle *tx)
+{
+ mod_timer(&tx->timer_wait_idle, jiffies + msecs_to_jiffies(tx->auto_delay));
+}
+
+static inline void asc_tx_wake_cp(struct asc_tx_handle *tx)
+{
+ if(tx->cfg.gpio_wake >= 0)
+ oem_gpio_direction_output(tx->cfg.gpio_wake, tx->cfg.polar);
+}
+
+static inline void asc_tx_sleep_cp(struct asc_tx_handle *tx)
+{
+ if(tx->cfg.gpio_wake >= 0)
+ oem_gpio_direction_output(tx->cfg.gpio_wake, !tx->cfg.polar);
+}
+
+static inline int asc_tx_cp_be_ready(struct asc_tx_handle *tx)
+{
+ int ret = 0;
+
+ if(tx->cfg.gpio_ready >= 0)
+ ret = ((!!oem_gpio_get_value(tx->cfg.gpio_ready)) == (tx->cfg.polar));
+
+ return ret;
+}
+
+
+static int asc_tx_event_send(struct asc_tx_handle *tx, int id)
+{
+ unsigned long flags = 0;
+ struct asc_event *event = NULL;
+ int ret = -ENODEV;
+
+ if(tx->thread == NULL){
+ ASCPRT("%s:no thread for event\n", __FUNCTION__);
+ return ret;
+ }
+
+ /*check whether the event is cared by current charge state*/
+ if(id >= 0){
+ event = asc_event_malloc();
+ if(!event){
+ ASCPRT("No memory to create new event.\n");
+ ret = -ENOMEM;
+ goto send_event_error;
+ }
+ /*insert a new event to the list tail and wakeup the process thread*/
+ //ASCDPRT("Send tx event(%d) to state(%s).\n", id, tx->table[atomic_read(&tx->state)].name);
+ event->id = id;
+ spin_lock_irqsave(&tx->slock, flags);
+ if(AP_TX_EVENT_RESET == id){
+ list_add(&event->list, &tx->event_q);
+ }else{
+ list_add_tail(&event->list, &tx->event_q);
+ }
+ spin_unlock_irqrestore(&tx->slock, flags);
+ wake_up(&tx->wait);
+ }
+send_event_error:
+ return ret;
+}
+
+static int asc_tx_event_recv(struct asc_tx_handle *tx)
+{
+ unsigned long flags = 0;
+ struct asc_event *event = NULL;
+ int ret = -ENODEV;
+
+ if(tx->thread == NULL){
+ ASCPRT("%s:no thread for event\n", __FUNCTION__);
+ return ret;
+ }
+
+ spin_lock_irqsave(&tx->slock, flags);
+ if(!list_empty(&tx->event_q)){
+ event = list_first_entry(&tx->event_q, struct asc_event, list);
+ list_del(&event->list);
+ }
+ spin_unlock_irqrestore(&tx->slock, flags);
+
+ if(event){
+ ret = event->id;
+ asc_event_free(event);
+ }
+ return ret;
+}
+
+static int asc_tx_get_user(struct asc_tx_handle *tx, const char *name)
+{
+ int ret = 0;
+ struct asc_user *user = NULL;
+
+ user = asc_tx_user_lookup(tx, name);
+ if(user){
+ atomic_inc(&user->count);
+ }else{
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+static int asc_tx_put_user(struct asc_tx_handle *tx, const char *name)
+{
+ struct asc_user *user = NULL;
+ int ret = 0;
+
+ user = asc_tx_user_lookup(tx, name);
+
+ if(user){
+ if(atomic_read(&user->count) >= 1){
+ atomic_dec(&user->count);
+ }
+ }else{
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+
+static int asc_tx_refer(struct asc_tx_handle *tx, const char *name)
+{
+ unsigned long flags = 0;
+ struct asc_user *user = NULL, *t = NULL;
+ int count = 0;
+
+ if(name){
+ /*get the reference count of the user*/
+ user = asc_tx_user_lookup(tx, name);
+ if(user){
+ count = atomic_read(&user->count);
+ }
+ }else{
+ spin_lock_irqsave(&tx->slock, flags);
+ list_for_each_entry_safe(user, t, &tx->user_list, node) {
+ count += atomic_read(&user->count);
+ }
+ spin_unlock_irqrestore(&tx->slock, flags);
+ }
+
+ return count;
+}
+
+static int asc_rx_refer(struct asc_rx_handle *rx, const char *name)
+{
+ unsigned long flags = 0;
+ struct asc_user *user = NULL, *t = NULL;
+ int count = 0;
+
+ if(name){
+ /*get the reference count of the user*/
+ user = asc_rx_user_lookup(rx, name);
+ if(user){
+ count = atomic_read(&user->count);
+ }
+ }else{
+ spin_lock_irqsave(&rx->slock, flags);
+ list_for_each_entry_safe(user, t, &rx->user_list, node) {
+ count += atomic_read(&user->count);
+ }
+ spin_unlock_irqrestore(&rx->slock, flags);
+ }
+
+ return count;
+}
+
+static void asc_tx_refer_clear(struct asc_tx_handle *tx)
+{
+ unsigned long flags = 0;
+ struct asc_user *user = NULL, *t = NULL;
+
+ spin_lock_irqsave(&tx->slock, flags);
+ list_for_each_entry_safe(user, t, &tx->user_list, node) {
+ atomic_set(&user->count, 0);
+ }
+ spin_unlock_irqrestore(&tx->slock, flags);
+}
+
+static int asc_tx_event_thread(void *data)
+{
+ struct asc_tx_handle *tx = (struct asc_tx_handle *)data;
+ int id = 0, index;
+ char name[ASC_NAME_LEN] = {0};
+ struct asc_state_dsp *dsp = NULL;
+
+ snprintf(name, ASC_NAME_LEN, "asc_tx_%s", tx->cfg.name);
+ tx->thread = current;
+ daemonize(name);
+ ASCDPRT("%s thread start now.\n", name);
+
+ while(1){
+ /*sleep until receive an evnet or thread exist*/
+ wait_event(tx->wait, ((id=asc_tx_event_recv(tx)) >= 0) || (!tx->thread) );
+ /*thread is existed*/
+ if(!tx->thread){
+ break;
+ }
+
+ mutex_lock(&tx->mlock);
+ if(AP_TX_EVENT_RESET == id){
+ asc_tx_handle_reset(tx);
+ }else{
+ index = atomic_read(&tx->state);
+ dsp = tx->table + index;
+ if(dsp->handle){
+ ASCDPRT("Tx(%s): process event(%d) in state(%s).\n", tx->cfg.name, id, dsp->name);
+ dsp->handle(tx, id);
+ ASCDPRT("Tx(%s): go into state(%s) .\n", tx->cfg.name, tx->table[atomic_read(&tx->state)].name);
+ }
+ }
+ mutex_unlock(&tx->mlock);
+ }
+
+ ASCDPRT("%s thread exit.\n", name);
+ kfree(tx);
+ return 0;
+}
+
+static void asc_tx_wait_ready_timer(unsigned long data)
+{
+ struct asc_tx_handle *tx = (struct asc_tx_handle *)data;
+ //ASCDPRT("%s tx wait ready timer is timeout.\n", tx->name);
+ asc_tx_event_send(tx, AP_TX_EVENT_WAIT_TIMEOUT);
+}
+
+static void asc_tx_wait_idle_timer(unsigned long data)
+{
+ char path[ASC_NAME_LEN] = {0};
+ struct asc_tx_handle *tx = (struct asc_tx_handle *)data;
+ //ASCDPRT("%s tx wait idle timer is timeout.\n", tx->name);
+ snprintf(path, ASC_NAME_LEN, "%s.%s", tx->cfg.name, ASC_TX_AUTO_USER);
+ asc_tx_put_ready(path, 0);
+}
+
+static void asc_tx_wait_sleep_timer(unsigned long data)
+{
+ struct asc_tx_handle *tx = (struct asc_tx_handle *)data;
+ //ASCDPRT("%s tx wait sleep timer is timeout.\n", tx->name);
+ asc_tx_event_send(tx, AP_TX_EVENT_IDLE_TIMEOUT);
+}
+
+static int asc_tx_handle_init(struct asc_tx_handle *tx)
+{
+ int ret = 0;
+ char *name = NULL;
+ struct asc_config *cfg = &tx->cfg;
+
+ ret = oem_gpio_request(cfg->gpio_wake, "ap_wake_cp");
+ if(ret < 0){
+ ASCPRT("Fail to requset ap_wake_cp gpio %d for %s.\n", cfg->gpio_wake, cfg->name);
+ goto err_request_gpio_ap_wake_cp;
+ }
+
+ if(cfg->gpio_ready >= 0){
+ ret = oem_gpio_request(cfg->gpio_ready, "cp_ready");
+ if(ret < 0){
+ ASCPRT("Fail to requset cp_ready gpio %d for %s.\n", cfg->gpio_ready, cfg->name);
+ goto err_request_gpio_cp_ready;
+ }
+
+ oem_gpio_irq_mask(cfg->gpio_ready);
+ oem_gpio_direction_input_for_irq(cfg->gpio_ready);
+ oem_gpio_set_irq_type(cfg->gpio_ready, IRQF_TRIGGER_RISING);
+ ret = oem_gpio_request_irq(cfg->gpio_ready, asc_irq_cp_indicate_state,
+ IRQF_SHARED, "cp_indicate_state", tx);
+ oem_gpio_irq_unmask(cfg->gpio_ready);
+ if (ret < 0) {
+ ASCPRT("fail to request irq for %s:cp_ready\n", cfg->name);
+ goto err_req_irq_cp_indicate_state;
+ }
+ }
+ asc_tx_sleep_cp(tx);
+
+ tx->auto_delay = ASC_TX_AUTO_DELAY_TIME;
+ tx->table = asc_tx_table;
+ mutex_init(&tx->mlock);
+ INIT_LIST_HEAD(&tx->event_q);
+ INIT_LIST_HEAD(&tx->user_list);
+ spin_lock_init(&tx->slock);
+ name = kzalloc(ASC_NAME_LEN, GFP_KERNEL);
+ if(!name){
+ ret = -ENOMEM;
+ ASCPRT("%s: no memory to malloc for wake lock name\n", __FUNCTION__);
+ goto err_malloc_name;
+ }
+ snprintf(name, ASC_NAME_LEN, "asc_tx_%s", tx->cfg.name);
+ wake_lock_init(&tx->wlock, WAKE_LOCK_SUSPEND, name);
+ init_waitqueue_head(&tx->wait);
+ init_waitqueue_head(&tx->wait_tx_state);
+ setup_timer(&tx->timer_wait_ready, asc_tx_wait_ready_timer, (unsigned long)tx);
+ setup_timer(&tx->timer_wait_idle, asc_tx_wait_idle_timer, (unsigned long)tx);
+ setup_timer(&tx->timer_wait_sleep, asc_tx_wait_sleep_timer, (unsigned long)tx);
+ atomic_set(&tx->state, AP_TX_ST_SLEEP);
+ atomic_set(&tx->count, 0);
+ INIT_WORK(&tx->ntf_work, asc_tx_notifier_work);
+ ret = kernel_thread(asc_tx_event_thread, tx, 0);
+ if(ret < 0){
+ ASCPRT("Fail to create %s tx thread.\n", tx->cfg.name);
+ goto err_create_tx_event_thread;
+ }
+
+ return 0;
+err_create_tx_event_thread:
+ if(cfg->gpio_ready >= 0)
+ free_irq(oem_gpio_to_irq(cfg->gpio_ready), tx);
+err_malloc_name:
+ if(name)
+ kfree(name);
+err_req_irq_cp_indicate_state:
+ if(cfg->gpio_ready >= 0)
+ oem_gpio_free(cfg->gpio_ready);
+err_request_gpio_cp_ready:
+ if(cfg->gpio_wake >= 0)
+ oem_gpio_free(cfg->gpio_wake);
+err_request_gpio_ap_wake_cp:
+ return ret;
+}
+
+static int asc_tx_handle_sleep(void *data, int event)
+{
+ int ret = 0;
+ struct asc_tx_handle *tx = (struct asc_tx_handle *)data;
+
+ //ASCDPRT("Tx(%s): process event(%d) in state(%s).\n", tx->name, event, tx->table[atomic_read(&tx->state)].name);
+
+ if(AP_TX_ST_SLEEP != atomic_read(&tx->state)){
+ return 0;
+ }
+
+ switch(event){
+ case AP_TX_EVENT_REQUEST:
+ wake_lock(&tx->wlock);
+ asc_tx_wake_cp(tx);
+ if(tx->cfg.gpio_ready >= 0){
+ mod_timer(&tx->timer_wait_ready, jiffies + msecs_to_jiffies(ASC_TX_WAIT_READY_TIME));
+ atomic_set(&tx->state, AP_TX_ST_WAIT_READY);
+ if(asc_tx_cp_be_ready(tx)){
+ mdelay(ASC_TX_DEBOUNCE_TIME);//debounce wait, make sure CBP has already be ready
+ if(asc_tx_cp_be_ready(tx)){
+ ASCDPRT("Tx:cp %s was ready now.\n", tx->cfg.name);
+ asc_tx_handle_wait_ready(tx, AP_TX_EVENT_CP_READY);
+ }
+ }
+ }else{
+ mdelay(ASC_TX_DEBOUNCE_TIME);
+ atomic_set(&tx->state, AP_TX_ST_WAIT_READY);
+ asc_tx_handle_wait_ready(tx, AP_TX_EVENT_CP_READY);
+ }
+ break;
+ default:
+ ASCDPRT("Tx: ignore event %d in state(%s)", event, tx->table[atomic_read(&tx->state)].name);
+ }
+
+ //ASCDPRT("Tx(%s): go into state(%s).\n", tx->name, tx->table[atomic_read(&tx->state)].name);
+ return ret;
+}
+
+static int asc_tx_handle_wait_ready(void *data, int event)
+{
+ int ret = 0;
+ struct asc_tx_handle *tx = (struct asc_tx_handle *)data;
+
+ if(AP_TX_ST_WAIT_READY != atomic_read(&tx->state)){
+ return 0;
+ }
+ //ASCDPRT("Tx(%s): process event(%d) in state(%s).\n", tx->name, event, tx->table[atomic_read(&tx->state)].name);
+ switch(event){
+ case AP_TX_EVENT_CP_READY:
+ del_timer(&tx->timer_wait_ready);
+ tx->wait_try = 0;
+ atomic_set(&tx->state, AP_TX_ST_READY);
+ wake_up_interruptible_all(&tx->wait_tx_state);
+ if(asc_tx_refer(tx, ASC_TX_AUTO_USER) > 0){
+ asc_tx_trig_busy(tx);
+ }
+ asc_tx_notifier(tx, ASC_NTF_TX_READY);
+ break;
+ case AP_TX_EVENT_WAIT_TIMEOUT:
+ ASCPRT("Tx: %s wait cp ready timeout, try=%d.\n", tx->cfg.name, tx->wait_try);
+
+ {
+ //kevin add .i dont know who disable irq after resume,so i reeanble it
+ oem_gpio_direction_input_for_irq(GPIO_VIATEL_USB_MDM_RDY);
+ oem_gpio_set_irq_type(GPIO_VIATEL_USB_MDM_RDY,(IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING));
+ oem_gpio_irq_unmask(GPIO_VIATEL_USB_MDM_RDY);
+
+ }
+
+ asc_tx_sleep_cp(tx);
+ mdelay(ASC_TX_DEBOUNCE_TIME);//delay to create a implus
+ atomic_set(&tx->state, AP_TX_ST_SLEEP);
+ if(tx->wait_try++ <= ASC_TX_TRY_TIMES){
+ asc_tx_event_send(tx, AP_TX_EVENT_REQUEST);
+ }else{
+ tx->wait_try = 0;
+ atomic_set(&tx->state, AP_TX_ST_SLEEP);
+ asc_tx_refer_clear(tx);
+ wake_up_interruptible_all(&tx->wait_tx_state);
+ wake_unlock(&tx->wlock);
+ asc_tx_notifier(tx, ASC_NTF_TX_UNREADY);
+ ASCPRT("try out to wake %s.\n", tx->cfg.name);
+ }
+ break;
+ case AP_TX_EVENT_STOP:
+ asc_tx_sleep_cp(tx);
+ del_timer(&tx->timer_wait_ready);
+ tx->wait_try = 0;
+ atomic_set(&tx->state, AP_TX_ST_SLEEP);
+ wake_unlock(&tx->wlock);
+ wake_up_interruptible_all(&tx->wait_tx_state);
+ break;
+ default:
+ ASCDPRT("Tx: ignore event %d in state(%s)", event, tx->table[atomic_read(&tx->state)].name);
+ }
+
+ //ASCDPRT("Tx(%s): go into state(%s).\n", tx->name, tx->table[atomic_read(&tx->state)].name);
+ return ret;
+}
+
+static int asc_tx_handle_ready(void *data, int event)
+{
+ int ret = 0;
+ struct asc_tx_handle *tx = (struct asc_tx_handle *)data;
+
+ if(AP_TX_ST_READY != atomic_read(&tx->state)){
+ return 0;
+ }
+ //ASCDPRT("Tx(%s): process event(%d) in state(%s).\n", tx->name, event, tx->table[atomic_read(&tx->state)].name);
+ switch(event){
+ case AP_TX_EVENT_STOP:
+ del_timer(&tx->timer_wait_idle);
+ asc_tx_sleep_cp(tx);
+ atomic_set(&tx->state, AP_TX_ST_SLEEP);
+ wake_unlock(&tx->wlock);
+ //mod_timer(&tx->timer_wait_sleep, jiffies + msecs_to_jiffies(ASC_TX_WAIT_IDLE_TIME));
+ break;
+ default:
+ ASCDPRT("Tx: ignore event %d in state(%s)", event, tx->table[atomic_read(&tx->state)].name);
+ }
+
+ //ASCDPRT("Tx(%s): go into state(%s).\n", tx->name, tx->table[atomic_read(&tx->state)].name);
+ return ret;
+}
+
+/*Ignore the idle state, wait for a while to let CBP go to sleep*/
+static int asc_tx_handle_idle(void *data, int event)
+{
+ int ret = 0;
+ struct asc_tx_handle *tx = (struct asc_tx_handle *)data;
+
+ if(AP_TX_ST_IDLE != atomic_read(&tx->state)){
+ return 0;
+ }
+
+ //ASCDPRT("Tx(%s): process event(%d) in state(%s).\n", tx->name, event, tx->table[atomic_read(&tx->state)].name);
+ switch(event){
+ case AP_TX_EVENT_IDLE_TIMEOUT:
+ atomic_set(&tx->state, AP_TX_ST_SLEEP);
+ wake_unlock(&tx->wlock);
+ break;
+ case AP_TX_EVENT_REQUEST:
+ del_timer(&tx->timer_wait_sleep);
+ atomic_set(&tx->state, AP_TX_ST_SLEEP);
+ /*loop back to SLEEP handle*/
+ asc_tx_event_send(tx, AP_TX_EVENT_REQUEST);
+ break;
+ default:
+ ASCDPRT("Tx: ignore event %d in state(%s)", event, tx->table[atomic_read(&tx->state)].name);
+ }
+
+ //ASCDPRT("Tx(%s): go into state(%s).\n", tx->name, tx->table[atomic_read(&tx->state)].name);
+ return ret;
+}
+
+static void asc_tx_handle_reset(struct asc_tx_handle *tx)
+{
+ unsigned long flags;
+
+ ASCDPRT("%s %s\n", __FUNCTION__, tx->cfg.name);
+ del_timer(&tx->timer_wait_ready);
+ del_timer(&tx->timer_wait_idle);
+ del_timer(&tx->timer_wait_sleep);
+ spin_lock_irqsave(&tx->slock, flags);
+ INIT_LIST_HEAD(&tx->event_q);
+ spin_unlock_irqrestore(&tx->slock, flags);
+ asc_tx_sleep_cp(tx);
+ atomic_set(&tx->state, AP_TX_ST_SLEEP);
+ wake_unlock(&tx->wlock);
+}
+
+/**
+ * asc_tx_reset - reset the tx handle
+ * @name: the config name for the handle
+ *
+ * return 0 ok, others be error
+ */
+void asc_tx_reset(const char *name)
+{
+ struct asc_tx_handle *tx = NULL;
+
+ tx = asc_tx_handle_lookup(name);
+ if(tx){
+ asc_tx_event_send(tx, AP_TX_EVENT_RESET);
+ }
+}
+EXPORT_SYMBOL(asc_tx_reset);
+
+/**
+ * asc_tx_set_auto_delay - change the delay time for auto ready
+ * @name: the config name for the handle
+ * @delay: the time for auto ready which is valid while more than default value
+ * return 0 ok,others be error
+ */
+int asc_tx_set_auto_delay(const char *name, int delay)
+{
+ int ret = 0;
+ unsigned long flags;
+ struct asc_tx_handle *tx;
+
+ tx = asc_tx_handle_lookup(name);
+ if(!tx){
+ ret = -ENODEV;
+ goto end;
+ }
+ if(delay > 0){
+ spin_lock_irqsave(&tx->slock, flags);
+ tx->auto_delay = delay;
+ spin_unlock_irqrestore(&tx->slock, flags);
+ }
+
+end:
+ return ret;
+}
+EXPORT_SYMBOL(asc_tx_set_auto_delay);
+
+/**
+ * asc_tx_check_ready - check whether tx tanslation has alreay be ready
+ * @name: the config name for the handle
+ *
+ * return 1 waken, 0 not, others be error
+ */
+int asc_tx_check_ready(const char *name)
+{
+ int ret = 0;
+ struct asc_tx_handle *tx;
+
+ tx = asc_tx_handle_lookup(name);
+ if(NULL == tx)
+ return -ENODEV;
+
+ ret = atomic_read(&tx->state);
+
+ if(ret == AP_TX_ST_READY){
+ ret = 1;
+ }else{
+ ret = 0;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(asc_tx_check_ready);
+
+/**
+ * asc_tx_user_counts - get the refernce count of the user or the handle
+ * @path: (handle name).[user name]
+ * If user name is NULL, return the count of tx handle.
+ * others return the count of the tx user
+ */
+int asc_tx_user_count(const char *path)
+{
+ const char *name;
+ char hname[ASC_NAME_LEN] = {0};
+ struct asc_tx_handle *tx = NULL;
+
+ name = strchr(path, '.');
+ if (name){
+ memcpy(hname, path, min(name - path, ASC_NAME_LEN - 1));
+ name++;
+ }else {
+ strncpy(hname, path, ASC_NAME_LEN - 1);
+ }
+
+ tx = asc_tx_handle_lookup(hname);
+
+ if(NULL == tx)
+ return -ENODEV;
+
+ return asc_tx_refer(tx, name);
+}
+EXPORT_SYMBOL(asc_tx_user_count);
+
+/**
+ * asc_tx_add_user - add a user for tx handle
+ * @name: the config name for the handle
+ * @infor: the user information
+ *
+ * return 0, others be error
+ */
+int asc_tx_add_user(const char *name, struct asc_infor *infor)
+{
+ int ret = 0;
+ unsigned long flags = 0;
+ struct asc_tx_handle *tx;
+ struct asc_user *user;
+
+ tx = asc_tx_handle_lookup(name);
+ if(NULL == tx)
+ return -ENODEV;
+
+ user = asc_tx_user_lookup(tx, infor->name);
+ if(NULL == user){
+ user = kzalloc(sizeof(*user), GFP_KERNEL);
+ if(!user){
+ ASCPRT("No memory to create new user reference.\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+ user->infor.data = infor->data;
+ user->infor.notifier = infor->notifier;
+ strncpy(user->infor.name, infor->name, ASC_NAME_LEN - 1);
+ atomic_set(&user->count, 0);
+ spin_lock_irqsave(&tx->slock, flags);
+ list_add_tail(&user->node, &tx->user_list);
+ spin_unlock_irqrestore(&tx->slock, flags);
+ }else{
+ ASCPRT("%s error: user %s already exist!!\n", __FUNCTION__, infor->name);
+ ret = -EINVAL;
+ }
+error:
+ return ret;
+}
+EXPORT_SYMBOL(asc_tx_add_user);
+
+/**
+ * asc_tx_del_user - delete a user for tx handle
+ * @path: (handle name).(user name)
+ *
+ * no return
+ */
+void asc_tx_del_user(const char *path)
+{
+ unsigned long flags = 0;
+ char hname[ASC_NAME_LEN] = {0};
+ const char *name;
+ struct asc_user *user = NULL;
+ struct asc_tx_handle *tx = NULL;
+
+ name = strchr(path, '.');
+ if (name) {
+ memcpy(hname, path, min(name - path, ASC_NAME_LEN - 1));
+ name++;
+ }else{
+ ASCPRT("%s: invalid path %s\n", __FUNCTION__, path);
+ return ;
+ }
+
+ /*if reserve user, do nothing*/
+ if (!strncmp(name, ASC_TX_SYSFS_USER, ASC_NAME_LEN - 1) || \
+ !strncmp(name, ASC_TX_AUTO_USER, ASC_NAME_LEN - 1) ) {
+ ASCPRT("Can not delete reserve user %s\n", path);
+ return ;
+ }
+
+ tx = asc_tx_handle_lookup(hname);
+
+ if(NULL == tx)
+ return ;
+
+ user = asc_tx_user_lookup(tx, name);
+ if(user){
+ /*put ready if the user had operated Tx handle*/
+ if(atomic_read(&user->count) > 0){
+ atomic_set(&user->count, 1);
+ asc_tx_put_ready(path, 0);
+ }
+ spin_lock_irqsave(&tx->slock, flags);
+ list_del(&user->node);
+ spin_unlock_irqrestore(&tx->slock, flags);
+ kfree(user);
+ }
+
+ return ;
+}
+EXPORT_SYMBOL(asc_tx_del_user);
+
+/**
+ * asc_tx_get_ready - lock CBP to work
+ * @path: (handle name).(user name)
+ * @block: whether block wait for CBP has already waken
+ *
+ * This function try to wake the CBP and add the reference count.
+ * It will block wait for CBP has already be waken if set sync parameter,
+ * otherwise it just trig the action to wake CBP, which can not make sure
+ * that CBP has be waken after return.
+ * return 0 is ok, otherwise something error
+ */
+int asc_tx_get_ready(const char *path, int sync)
+{
+ int ret = 0;
+ char hname[ASC_NAME_LEN] = {0};
+ const char *name;
+ struct asc_tx_handle *tx = NULL;
+
+ name = strchr(path, '.');
+ if (name) {
+ memcpy(hname, path, min(name - path, ASC_NAME_LEN - 1));
+ name++;
+ } else {
+ ASCPRT("Invalid path %s\n", path);
+ return -EINVAL;
+ }
+ tx = asc_tx_handle_lookup(hname);
+ if(NULL == tx)
+ return -ENODEV;
+
+ if(asc_tx_get_user(tx, name) < 0){
+ ASCPRT("%s:tx user name %s is unknow\n", __FUNCTION__, name);
+ return -ENODEV;
+ }
+ ASCDPRT("%s: %s=%d, %s=%d\n", __FUNCTION__,\
+ tx->cfg.name, asc_tx_refer(tx, NULL), path, asc_tx_refer(tx, name));
+ switch(atomic_read(&tx->state)){
+ case AP_TX_ST_SLEEP:
+ //To make CP wake ASAP,call the funtion directly
+ if(!list_empty(&tx->event_q)){
+ asc_tx_handle_sleep(tx, AP_TX_EVENT_REQUEST);
+ }else{
+ asc_tx_event_send(tx, AP_TX_EVENT_REQUEST);
+ }
+ break;
+ case AP_TX_ST_IDLE:
+ asc_tx_event_send(tx, AP_TX_EVENT_REQUEST);
+ break;
+ case AP_TX_ST_WAIT_READY:
+ case AP_TX_ST_READY:
+ if(!strncmp(name, ASC_TX_AUTO_USER, strlen(ASC_TX_AUTO_USER))){
+ asc_tx_trig_busy(tx);
+ }
+ break;
+ default:
+ ASCPRT("Unknow tx state %d\n", atomic_read(&tx->state));
+ return -EINVAL;
+ }
+
+ if(sync){
+ if(AP_TX_ST_READY != atomic_read(&tx->state)){
+ interruptible_sleep_on(&tx->wait_tx_state);
+ if(AP_TX_ST_READY != atomic_read(&tx->state)){
+ ret = -EBUSY;
+ }
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(asc_tx_get_ready);
+
+/**
+ * asc_tx_put_ready - lock CBP to work if not set auto sleep
+ * @path: (config name).[user name]
+ * @block: whether block wait for CBP has already waken
+ *
+ * This function try to wake the CBP. It will block wait for CBP
+ * has already be sleep if set sync parameter, otherwise it just
+ * trig the action to sleep CBP, which can not make sure that
+ * CBP has be sleep after return. If the reference count is not 1. it
+ * do nothing but sub one.
+ * return 0 is ok, otherwise something error
+ */
+int asc_tx_put_ready(const char *path, int sync)
+{
+ int ret = 0;
+ char hname[ASC_NAME_LEN] = {0};
+ const char *name;
+ struct asc_tx_handle *tx = NULL;
+
+ name = strchr(path, '.');
+ if (name) {
+ memcpy(hname, path, min(name - path, ASC_NAME_LEN - 1));
+ name++;
+ } else {
+ ASCPRT("Invalid path %s\n", path);
+ return -EINVAL;
+ }
+
+ tx = asc_tx_handle_lookup(hname);
+ if(NULL == tx)
+ return -ENODEV;
+
+ if(asc_tx_put_user(tx, name) < 0){
+ ASCPRT("%s:tx user name %s is unknow\n", __FUNCTION__, name);
+ return -ENODEV;
+ }
+ ASCDPRT("%s: %s=%d, %s=%d\n", __FUNCTION__,\
+ tx->cfg.name, asc_tx_refer(tx, NULL), path, asc_tx_refer(tx, name));
+ /*count is not 0, so do nothing*/
+ if(asc_tx_refer(tx, NULL) != 0){
+ return 0;
+ }
+
+ switch(atomic_read(&tx->state)){
+ case AP_TX_ST_SLEEP:
+ break;
+ case AP_TX_ST_WAIT_READY:
+ case AP_TX_ST_READY:
+ asc_tx_event_send(tx, AP_TX_EVENT_STOP);
+ break;
+ case AP_TX_ST_IDLE:
+ asc_tx_event_send(tx, AP_TX_EVENT_IDLE_TIMEOUT);
+ break;
+ default:
+ ASCPRT("Unknow tx state %d\n", atomic_read(&tx->state));
+ return -EINVAL;
+ }
+
+ if(sync){
+ if(AP_TX_ST_SLEEP != atomic_read(&tx->state)){
+ interruptible_sleep_on(&tx->wait_tx_state);
+ if(AP_TX_ST_SLEEP != atomic_read(&tx->state)){
+ ret = -EBUSY;
+ }
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(asc_tx_put_ready);
+
+/**
+ * asc_tx_auto_ready - call each time before operate for CBP if set auto sleep
+ * @name: the cofnig name for the handle
+ * @sync: whether block wait for CBP has already waken
+ *
+ * This function try to wake the CBP and trig the tx state. It will
+ * block wait for CBP has already be waken if set sync parameter,
+ * otherwise it just trig the action to wake CBP, which can not make
+ * sure that CBP has be waken after return.
+ * return 0 is ok, otherwise something error
+ */
+int asc_tx_auto_ready(const char *name, int sync)
+{
+ int ret = 0;
+ struct asc_user *user;
+ struct asc_tx_handle *tx;
+
+ if (!name) {
+ ASCPRT("%s:Invalid name\n", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ tx = asc_tx_handle_lookup(name);
+ if(NULL == tx)
+ return -ENODEV;
+
+ user = asc_tx_user_lookup(tx, ASC_TX_AUTO_USER);
+ if(!user){
+ return -ENODEV;
+ }
+
+ if(atomic_read(&user->count) == 0){
+ ASCDPRT("%s: %s=%d, %s=%d\n", __FUNCTION__,\
+ tx->cfg.name, asc_tx_refer(tx, NULL), ASC_TX_AUTO_USER, asc_tx_refer(tx, ASC_TX_AUTO_USER));
+ atomic_inc(&user->count);
+ }
+
+ switch(atomic_read(&tx->state)){
+ case AP_TX_ST_SLEEP:
+ //To make CP wake ASAP,call the funtion directly
+ if(!list_empty(&tx->event_q)){
+ asc_tx_handle_sleep(tx, AP_TX_EVENT_REQUEST);
+ }else{
+ asc_tx_event_send(tx, AP_TX_EVENT_REQUEST);
+ }
+ break;
+ case AP_TX_ST_IDLE:
+ asc_tx_event_send(tx, AP_TX_EVENT_REQUEST);
+ break;
+ case AP_TX_ST_WAIT_READY:
+ case AP_TX_ST_READY:
+ asc_tx_trig_busy(tx);
+ break;
+ default:
+ ASCPRT("Unknow tx state %d\n", atomic_read(&tx->state));
+ return -EINVAL;
+ }
+
+ if(sync){
+ if(AP_TX_ST_READY != atomic_read(&tx->state)){
+ interruptible_sleep_on(&tx->wait_tx_state);
+ if(AP_TX_ST_READY != atomic_read(&tx->state)){
+ ret = -EBUSY;
+ }
+ }
+ }
+
+ return ret;
+
+}
+EXPORT_SYMBOL(asc_tx_auto_ready);
+
+static void asc_rx_handle_reset(struct asc_rx_handle *rx)
+{
+ unsigned long flags;
+
+ ASCDPRT("%s %s\n", __FUNCTION__, rx->cfg.name);
+ del_timer(&rx->timer);
+ wake_unlock(&rx->wlock);
+ asc_rx_indicate_sleep(rx);
+ atomic_set(&rx->state, AP_RX_ST_SLEEP);
+ spin_lock_irqsave(&rx->slock, flags);
+ INIT_LIST_HEAD(&rx->event_q);
+ spin_unlock_irqrestore(&rx->slock, flags);
+
+}
+
+/**
+ * asc_rx_reset - reset the rx handle
+ * @name: the config name for the handle
+ *
+ * return 0 ok, others be error
+ */
+void asc_rx_reset(const char *name)
+{
+ struct asc_rx_handle *rx = NULL;
+
+ rx = asc_rx_handle_lookup(name);
+ if(rx){
+ asc_rx_event_send(rx, AP_RX_EVENT_RESET);
+ }
+}
+EXPORT_SYMBOL(asc_rx_reset);
+
+/**
+ * asc_rx_add_user - add a user for rx handle
+ * @name: the config name for the handle
+ * @infor: the user information
+ *
+ * return 0, others be error
+ */
+int asc_rx_add_user(const char *name, struct asc_infor *infor)
+{
+ int ret = 0;
+ unsigned long flags = 0;
+ struct asc_rx_handle *rx;
+ struct asc_user *user;
+
+ rx = asc_rx_handle_lookup(name);
+ if(NULL == rx)
+ return -ENODEV;
+
+ user = asc_rx_user_lookup(rx, infor->name);
+ if(NULL == user){
+ user = kzalloc(sizeof(*user), GFP_KERNEL);
+ if(!user){
+ ASCPRT("No memory to create new user reference.\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+ user->infor.data = infor->data;
+ user->infor.notifier = infor->notifier;
+ strncpy(user->infor.name, infor->name, ASC_NAME_LEN - 1);
+ atomic_set(&user->count, 0);
+ spin_lock_irqsave(&rx->slock, flags);
+ list_add_tail(&user->node, &rx->user_list);
+ spin_unlock_irqrestore(&rx->slock, flags);
+ if(AP_RX_ST_WAIT_READY == atomic_read(&rx->state)){
+ if(infor->notifier){
+ infor->notifier(ASC_NTF_RX_PREPARE, infor->data);
+ }
+ }
+ }else{
+ ASCPRT("%s error: user %s already exist!!\n", __FUNCTION__, infor->name);
+ ret = -EINVAL;
+ }
+error:
+ return ret;
+}
+EXPORT_SYMBOL(asc_rx_add_user);
+
+/**
+ * asc_rx_del_user - add a user for rx handle
+ * @path: (config name).[user name]
+ *
+ * no return
+ */
+void asc_rx_del_user(const char *path)
+{
+ unsigned long flags = 0;
+ const char *name;
+ char hname[ASC_NAME_LEN] = {0};
+ struct asc_user *user;
+ struct asc_rx_handle *rx;
+
+
+ name = strchr(path, '.');
+ if (name) {
+ memcpy(hname, path, min(name - path, ASC_NAME_LEN - 1));
+ name++;
+ }else{
+ ASCPRT("%s: Invalid path %s\n",__FUNCTION__, path);
+ return ;
+ }
+
+ rx = asc_rx_handle_lookup(hname);
+ if(NULL == rx)
+ return ;
+
+ user = asc_rx_user_lookup(rx, name);
+ if(user){
+ atomic_set(&user->count, 0);
+ spin_lock_irqsave(&rx->slock, flags);
+ list_del(&user->node);
+ spin_unlock_irqrestore(&rx->slock, flags);
+ kfree(user);
+ if(list_empty(&rx->user_list)){
+ asc_rx_handle_reset(rx);
+ }
+ }
+ return ;
+}
+EXPORT_SYMBOL(asc_rx_del_user);
+
+/**
+ * asc_rx_confirm_ready - echo AP state to rx CBP data
+ * @name: the config name to rx handle
+ * @ready: whether AP has been ready to rx data
+ *
+ * After CBP request AP to rx data, the function can be used to
+ * tell CBP whether AP has been ready to receive.
+ * return 0 is ok, otherwise something error
+ */
+int asc_rx_confirm_ready(const char *name, int ready)
+{
+ struct asc_rx_handle *rx = NULL;
+
+ rx = asc_rx_handle_lookup(name);
+ if(!rx){
+ ASCDPRT("%s: name %s is unknow\n", __FUNCTION__, name);
+ return -ENODEV;
+ }
+
+ ASCDPRT("Rx(%s) cnofirm ready=%d\n", rx->cfg.name, ready);
+ return asc_rx_event_send(rx, ready ? AP_RX_EVENT_AP_READY : AP_RX_EVENT_AP_UNREADY);
+}
+EXPORT_SYMBOL(asc_rx_confirm_ready);
+
+/**
+ * check_on_start - prevent from missing cp waking
+ * @name: the name of rx handle
+ *
+ * Before the rx user is registed,CP may wake up AP.Usually AP will ignore
+ * the waking.Because the interrupt don't be register at that time.So the
+ * signal will be missed.When opening the tty device, we should check whether CP
+ * has waken up AP or not.If CP did that,we should send the signal "AP READY"
+ * to CP.
+ */
+
+int asc_rx_check_on_start(const char *name)
+{
+ int level;
+ struct asc_config *cfg = NULL;
+ struct asc_rx_handle *rx = NULL;
+ int ret = 1;
+
+ rx = asc_rx_handle_lookup(name);
+ if(!rx){
+ ASCPRT("config %s has not already exist.\n", name);
+ return -EINVAL;
+ }
+
+ cfg = &(rx->cfg);
+ level = !!oem_gpio_get_value(cfg->gpio_wake);
+ if(level == cfg->polar){
+ /*Cp has requested Ap wake*/
+ if(AP_RX_ST_SLEEP == atomic_read(&rx->state)){
+ ASCDPRT("Rx(%s):check_on_start--send event AP_RX_EVENT_REQUEST.\n", cfg->name);
+ ret = asc_rx_event_send(rx, AP_RX_EVENT_REQUEST);
+ }else{
+ ASCDPRT("Rx(%s): check_on_start--send event AP_RX_EVENT_AP_READY.\n", cfg->name);
+ ret = asc_rx_event_send(rx, AP_RX_EVENT_AP_READY);
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(asc_rx_check_on_start);
+
+static ssize_t asc_debug_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ char *s = buf;
+ s += sprintf(s, "%d\n", asc_debug);
+
+ return (s - buf);
+}
+
+static ssize_t asc_debug_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long val;
+
+ if (strict_strtoul(buf, 10, &val))
+ return -EINVAL;
+
+ if (val < 0)
+ return -EINVAL;
+
+ asc_debug = val;
+
+ return n;
+}
+
+
+
+static ssize_t asc_infor_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ char *s = buf;
+ int val1, val2;
+ struct asc_config *cfg;
+ struct asc_infor *infor;
+ struct asc_user *user = NULL, *tuser = NULL;
+ struct asc_tx_handle *tx = NULL, *ttmp = NULL;
+ struct asc_rx_handle *rx = NULL, *rtmp = NULL;
+
+ list_for_each_entry_safe(tx, ttmp, &asc_tx_handle_list, node){
+ cfg = &tx->cfg;
+ val1 = val2 = -1;
+ if(cfg->gpio_wake >= 0)
+ val1 = !!oem_gpio_get_value(cfg->gpio_wake);
+ if(cfg->gpio_ready >= 0)
+ val2 = !!oem_gpio_get_value(cfg->gpio_ready);
+
+ s += sprintf(s, "Tx %s: ref=%d, ap_wake_cp(%d)=%d, cp_ready(%d)=%d, polar=%d, auto_delay=%d mS\n",
+ cfg->name, asc_tx_refer(tx, NULL), cfg->gpio_wake, val1, cfg->gpio_ready, val2, cfg->polar, tx->auto_delay);
+
+ list_for_each_entry_safe(user, tuser, &tx->user_list, node) {
+ infor = &user->infor;
+ s += sprintf(s, " user %s: ref=%d\n", infor->name, atomic_read(&user->count));
+ }
+ }
+
+ s += sprintf(s, "\n");
+
+ list_for_each_entry_safe(rx, rtmp, &asc_rx_handle_list, node){
+ cfg = &rx->cfg;
+ val1 = val2 = -1;
+ if(cfg->gpio_wake >= 0)
+ val1 = !!oem_gpio_get_value(cfg->gpio_wake);
+ if(cfg->gpio_ready >= 0)
+ val2 = !!oem_gpio_get_value(cfg->gpio_ready);
+
+ s += sprintf(s, "Rx %s: ref=%d, cp_wake_ap(%d)=%d, ap_ready(%d)=%d, polar=%d\n", \
+ cfg->name, asc_rx_refer(rx, NULL), cfg->gpio_wake, val1, cfg->gpio_ready, val2, cfg->polar);
+ list_for_each_entry_safe(user, tuser, &rx->user_list, node) {
+ infor = &user->infor;
+ s += sprintf(s, " user %s: ref=%d\n", infor->name, atomic_read(&user->count));
+ }
+ }
+
+ return (s - buf);
+}
+static ssize_t asc_infor_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ return n;
+}
+
+static ssize_t asc_refer_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ unsigned long flags;
+ char *s = buf;
+ struct asc_tx_handle *tx, *tmp, *t;
+
+ tx = tmp = NULL;
+ spin_lock_irqsave(&hdlock, flags);
+ list_for_each_entry_safe(tmp, t, &asc_tx_handle_list, node){
+ if(tmp->kobj == kobj){
+ tx = tmp;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&hdlock, flags);
+
+ if(tx){
+ s += sprintf(s, "%d\n", asc_tx_refer(tx, ASC_TX_SYSFS_USER));
+ return s - buf;
+ }else{
+ ASCPRT("%s read error\n", __FUNCTION__);
+ return -EINVAL;
+ }
+
+}
+
+
+static ssize_t asc_setcpmode_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ char *s = buf;
+ s += sprintf(s, "nothing\n");
+
+ return (s - buf);
+}
+
+static ssize_t asc_setcpmode_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long val;
+
+ if (strict_strtoul(buf, 10, &val))
+ return -EINVAL;
+
+ if (val < 0)
+ return -EINVAL;
+
+ if(val>0)
+ oem_gpio_output(GPIO_VIATEL_USB_AP_WAKE_MDM,1);
+ else
+ oem_gpio_output(GPIO_VIATEL_USB_AP_WAKE_MDM,0);
+
+
+ //asc_debug = val;
+
+ return n;
+}
+
+
+static ssize_t asc_refer_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long flags;
+ char *p;
+ int error = 0, len;
+ char path[ASC_NAME_LEN] = {0};
+ struct asc_tx_handle *tx, *tmp, *t;
+
+ tx = tmp = NULL;
+ spin_lock_irqsave(&hdlock, flags);
+ list_for_each_entry_safe(tmp, t, &asc_tx_handle_list, node){
+ if(tmp->kobj == kobj){
+ tx = tmp;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&hdlock, flags);
+
+ if(tx){
+ p = memchr(buf, '\n', n);
+ len = p ? p - buf : n;
+ snprintf(path, ASC_NAME_LEN, "%s.%s", tx->cfg.name, ASC_TX_SYSFS_USER);
+
+ if (len == 3 && !strncmp(buf, "get", len)) {
+ error = asc_tx_get_ready(path, 1);
+ }else if (len == 3 && !strncmp(buf, "put", len)) {
+ error= asc_tx_put_ready(path, 1);
+ }
+ }
+
+ return error ? error : n;
+}
+
+static ssize_t asc_state_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ unsigned long flags;
+ char *s = buf;
+ struct asc_tx_handle *tx, *tmp, *t;
+
+ tx = tmp = NULL;
+ spin_lock_irqsave(&hdlock, flags);
+ list_for_each_entry_safe(tmp, t, &asc_tx_handle_list, node){
+ if(tmp->kobj == kobj){
+ tx = tmp;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&hdlock, flags);
+
+ if(tx){
+ s += sprintf(s, "%s\n", tx->table[atomic_read(&tx->state)].name);
+ return s - buf;
+ }else{
+ ASCPRT("%s read error\n", __FUNCTION__);
+ return -EINVAL;
+ }
+
+}
+
+static ssize_t asc_state_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ return n;
+}
+
+static ssize_t asc_auto_ready_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ unsigned long flags;
+ char *s = buf;
+ struct asc_tx_handle *tx, *tmp, *t;
+
+ tx = tmp = NULL;
+ spin_lock_irqsave(&hdlock, flags);
+ list_for_each_entry_safe(tmp, t, &asc_tx_handle_list, node){
+ if(tmp->kobj == kobj){
+ tx = tmp;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&hdlock, flags);
+
+ if(tx){
+ s += sprintf(s, "%d\n", tx->auto_delay);
+ return s - buf;
+ }else{
+ ASCPRT("%s read error\n", __FUNCTION__);
+ return -EINVAL;
+ }
+
+}
+
+static ssize_t asc_auto_ready_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ int error = 0;
+ long val;
+ unsigned long flags;
+ struct asc_tx_handle *tx, *tmp, *t;
+
+ tx = tmp = NULL;
+ spin_lock_irqsave(&hdlock, flags);
+ list_for_each_entry_safe(tmp, t, &asc_tx_handle_list, node){
+ if(tmp->kobj == kobj){
+ tx = tmp;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&hdlock, flags);
+
+ if(tx){
+ error = strict_strtol(buf, 10, &val);
+ if(error || (val < 0)){
+ error = -EINVAL;
+ goto end;
+ }
+
+ if(val > 0){
+ spin_lock_irqsave(&tx->slock, flags);
+ tx->auto_delay = val;
+ spin_unlock_irqrestore(&tx->slock, flags);
+ }
+ error = asc_tx_auto_ready(tx->cfg.name, 1);
+ }else{
+ ASCPRT("%s read error\n", __FUNCTION__);
+ error = -EINVAL;
+ }
+
+end:
+ return error ? error : n;
+}
+
+#define asc_attr(_name) \
+static struct kobj_attribute _name##_attr = { \
+ .attr = { \
+ .name = __stringify(_name), \
+ .mode = 0644, \
+ }, \
+ .show = asc_##_name##_show, \
+ .store = asc_##_name##_store, \
+}
+
+asc_attr(debug);
+asc_attr(infor);
+asc_attr(setcpmode);
+
+static struct attribute * g_attr[] = {
+ &debug_attr.attr,
+ &infor_attr.attr,
+ &setcpmode_attr.attr,
+ NULL,
+};
+
+static struct attribute_group g_attr_group = {
+ .attrs = g_attr,
+};
+
+asc_attr(refer);
+asc_attr(state);
+asc_attr(auto_ready);
+
+static struct attribute * tx_hd_attr[] = {
+ &refer_attr.attr,
+ &state_attr.attr,
+ &auto_ready_attr.attr,
+ NULL,
+};
+
+int asc_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ oem_gpio_irq_mask(tx_cfg.gpio_ready);
+ oem_gpio_irq_mask(rx_cfg.gpio_wake);
+ if(ap_ready_always)
+ oem_gpio_output(rx_cfg.gpio_ready,!rx_cfg.polar);
+
+ return 0;
+}
+
+
+int asc_resume(struct platform_device *pdev)
+{
+
+ msleep(500);
+ printk("asc_resume\n");
+ oem_gpio_direction_output(tx_cfg.gpio_wake, tx_cfg.polar);
+ oem_gpio_direction_input_for_irq(tx_cfg.gpio_ready);
+ oem_gpio_set_irq_type(tx_cfg.gpio_ready, IRQF_TRIGGER_RISING);
+ oem_gpio_irq_unmask(tx_cfg.gpio_ready);
+
+ oem_gpio_output(rx_cfg.gpio_ready, rx_cfg.polar);
+ oem_gpio_direction_input_for_irq(rx_cfg.gpio_wake);
+ oem_gpio_set_irq_type(rx_cfg.gpio_wake, IRQF_TRIGGER_RISING);
+ oem_gpio_irq_unmask(rx_cfg.gpio_wake);
+
+ return 0;
+}
+
+static struct attribute_group tx_hd_attr_group = {
+ .attrs = tx_hd_attr,
+};
+
+
+static struct platform_driver asc_driver = {
+ .driver.name = "asc",
+ .suspend = asc_suspend,
+ .resume = asc_resume,
+};
+
+static struct platform_device asc_device = {
+ .name = "asc",
+};
+
+
+/**
+ * asc_rx_register_handle - register the rx handle
+ * @cfg: the config for the handle
+ *
+ * the device which receive data from CBP can register a notifier to
+ * listen the event according to the changes from CBP.
+ * ASC_PREPARE_RX_DATA event will be send when CBP start tx data
+ * to the device which must be ready to work;
+ * ASC_POST_RX_DATA event will be send when CBP stop tx data to
+ * the device which can go to sleep.
+ * The gpio for ap_ready can be -1 which be ignored when the device
+ * can receive the data from CBP correctly any time.
+ * return index according to the notifier in handle, otherwise something error
+ */
+int asc_rx_register_handle(struct asc_config *cfg)
+{
+ int ret = 0;
+ unsigned long flags;
+ struct asc_rx_handle *rx = NULL;
+
+ if(NULL == asc_work_queue){
+ ASCPRT("%s: error Asc has not been init\n", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ if(NULL == cfg){
+ return -EINVAL;
+ }
+
+ if(cfg->gpio_wake < 0){
+ ASCPRT("%s: config %s gpio is invalid.\n", __FUNCTION__, cfg->name);
+ return -EINVAL;
+ }
+
+ rx = asc_rx_handle_lookup(cfg->name);
+ if(rx){
+ ASCPRT("config %s has already exist.\n", cfg->name);
+ return -EINVAL;
+ }
+
+ rx = kzalloc(sizeof(struct asc_rx_handle), GFP_KERNEL);
+ if(NULL == rx){
+ ASCPRT("No memory to alloc rx handle.\n");
+ return -ENOMEM;
+ }
+
+ rx->cfg.gpio_ready = cfg->gpio_ready;
+ rx->cfg.gpio_wake = cfg->gpio_wake;
+ rx->cfg.polar = !!cfg->polar;
+ strncpy(rx->cfg.name, cfg->name, ASC_NAME_LEN - 1);
+ memcpy(&rx_cfg, &rx->cfg, sizeof(struct asc_config));
+
+ ret = asc_rx_handle_init(rx);
+ if(ret < 0){
+ kfree(rx);
+ ASCPRT("fail to init rx handle %s\n", rx->cfg.name);
+ return -EINVAL;
+ }
+
+ /* Add the handle to the asc list */
+ spin_lock_irqsave(&hdlock, flags);
+ list_add(&rx->node, &asc_rx_handle_list);
+ spin_unlock_irqrestore(&hdlock, flags);
+ ASCDPRT("Register rx handle %s\n", rx->cfg.name);
+ return ret;
+}
+EXPORT_SYMBOL(asc_rx_register_handle);
+
+/**
+ * asc_tx_register_handle - register the tx handle for state change
+ * @cfg: the config for the handle
+ *
+ * the chip which exchanged data with CBP must create a handle.
+ * There is only one tx state handle between the AP and CBP because
+ * all devices in CBP will be ready to receive data after CBP has been
+ * waken. But servial rx state handles can be exist because different
+ * devices in AP maybe waken indivially. Each rx state handle must be
+ * registed a notifier to listen the evnets.
+ * return 0 is ok, otherwise something error
+ */
+int asc_tx_register_handle(struct asc_config *cfg)
+{
+ int ret=0;
+ unsigned long flags;
+ struct asc_infor infor;
+ struct asc_tx_handle *tx = NULL;
+
+ if(NULL == asc_work_queue){
+ ASCPRT("%s: error Asc has not been init\n", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ if(NULL == cfg){
+ return -EINVAL;
+ }
+
+ if(cfg->gpio_wake < 0){
+ ASCPRT("%s: config %s gpio is invalid.\n", __FUNCTION__, cfg->name);
+ return -EINVAL;
+ }
+
+ tx = asc_tx_handle_lookup(cfg->name);
+ if(tx){
+ ASCPRT("config %s has already exist.\n", cfg->name);
+ return -EINVAL;
+ }
+
+ tx = kzalloc(sizeof(struct asc_tx_handle), GFP_KERNEL);
+ if(NULL == tx){
+ ASCPRT("Fail to alloc memory for tx handle.\n");
+ return -ENOMEM;
+ }
+
+ tx->cfg.gpio_ready = cfg->gpio_ready;
+ tx->cfg.gpio_wake = cfg->gpio_wake;
+ tx->cfg.polar = !!cfg->polar;
+ strncpy(tx->cfg.name, cfg->name, ASC_NAME_LEN - 1);
+ memcpy(&tx_cfg, &tx->cfg, sizeof(struct asc_config));
+ ret = asc_tx_handle_init(tx);
+ if(ret < 0){
+ ASCPRT("Fail to init tx handle %s.\n", tx->cfg.name);
+ goto err_tx_handle_init;
+ }
+
+ /* Add the handle to the asc list */
+ spin_lock_irqsave(&hdlock, flags);
+ list_add(&tx->node, &asc_tx_handle_list);
+ spin_unlock_irqrestore(&hdlock, flags);
+ ASCDPRT("Register tx handle %s.\n", tx->cfg.name);
+
+ tx->kobj = kobject_create_and_add(cfg->name, asc_kobj);
+ if(!tx->kobj){
+ ret = -ENOMEM;
+ goto err_create_kobj;
+ }
+
+ /*add default user for application*/
+ memset(&infor, 0, sizeof(infor));
+ strncpy(infor.name, ASC_TX_SYSFS_USER, ASC_NAME_LEN);
+ asc_tx_add_user(tx->cfg.name, &infor);
+ memset(&infor, 0, sizeof(infor));
+ strncpy(infor.name, ASC_TX_AUTO_USER, ASC_NAME_LEN);
+ asc_tx_add_user(tx->cfg.name, &infor);
+ return sysfs_create_group(tx->kobj, &tx_hd_attr_group);
+
+err_create_kobj:
+ list_del(&tx->node);
+err_tx_handle_init:
+ if(tx){
+ kfree(tx);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(asc_tx_register_handle);
+
+static int __init asc_init(void)
+{
+ int ret;
+ ret = oem_gpio_convert_init();
+ if(ret){
+ printk("Warnning:viatel gpio not set.\n");
+ return -EIO;
+ }
+
+ ret = platform_device_register(&asc_device);
+ if (ret) {
+ ASCPRT("platform_device_register failed\n");
+ goto err_platform_device_register;
+ }
+ ret = platform_driver_register(&asc_driver);
+ if (ret) {
+ ASCPRT("platform_driver_register failed\n");
+ goto err_platform_driver_register;
+ }
+
+ asc_work_queue = create_singlethread_workqueue("asc_work");
+ if (asc_work_queue == NULL) {
+ ret = -ENOMEM;
+ goto err_create_work_queue;
+ }
+
+ asc_kobj = viatel_kobject_add("asc");
+ if (!asc_kobj){
+ ret = -ENOMEM;
+ goto err_create_kobj;
+ }
+
+ return sysfs_create_group(asc_kobj, &g_attr_group);
+
+err_create_kobj:
+ destroy_workqueue(asc_work_queue);
+err_create_work_queue:
+ platform_driver_unregister(&asc_driver);
+err_platform_driver_register:
+ platform_device_unregister(&asc_device);
+err_platform_device_register:
+ return ret;
+}
+
+device_initcall(asc_init);
+
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
new file mode 100644
index 00000000..cb56e270
--- /dev/null
+++ b/drivers/misc/vmw_balloon.c
@@ -0,0 +1,838 @@
+/*
+ * VMware Balloon driver.
+ *
+ * Copyright (C) 2000-2010, VMware, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2 of the License and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained by: Dmitry Torokhov <dtor@vmware.com>
+ */
+
+/*
+ * This is VMware physical memory management driver for Linux. The driver
+ * acts like a "balloon" that can be inflated to reclaim physical pages by
+ * reserving them in the guest and invalidating them in the monitor,
+ * freeing up the underlying machine pages so they can be allocated to
+ * other guests. The balloon can also be deflated to allow the guest to
+ * use more physical memory. Higher level policies can control the sizes
+ * of balloons in VMs in order to manage physical memory resources.
+ */
+
+//#define DEBUG
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <asm/hypervisor.h>
+
+MODULE_AUTHOR("VMware, Inc.");
+MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
+MODULE_VERSION("1.2.1.3-k");
+MODULE_ALIAS("dmi:*:svnVMware*:*");
+MODULE_ALIAS("vmware_vmmemctl");
+MODULE_LICENSE("GPL");
+
+/*
+ * Various constants controlling rate of inflaint/deflating balloon,
+ * measured in pages.
+ */
+
+/*
+ * Rate of allocating memory when there is no memory pressure
+ * (driver performs non-sleeping allocations).
+ */
+#define VMW_BALLOON_NOSLEEP_ALLOC_MAX 16384U
+
+/*
+ * Rates of memory allocaton when guest experiences memory pressure
+ * (driver performs sleeping allocations).
+ */
+#define VMW_BALLOON_RATE_ALLOC_MIN 512U
+#define VMW_BALLOON_RATE_ALLOC_MAX 2048U
+#define VMW_BALLOON_RATE_ALLOC_INC 16U
+
+/*
+ * Rates for releasing pages while deflating balloon.
+ */
+#define VMW_BALLOON_RATE_FREE_MIN 512U
+#define VMW_BALLOON_RATE_FREE_MAX 16384U
+#define VMW_BALLOON_RATE_FREE_INC 16U
+
+/*
+ * When guest is under memory pressure, use a reduced page allocation
+ * rate for next several cycles.
+ */
+#define VMW_BALLOON_SLOW_CYCLES 4
+
+/*
+ * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't
+ * allow wait (__GFP_WAIT) for NOSLEEP page allocations. Use
+ * __GFP_NOWARN, to suppress page allocation failure warnings.
+ */
+#define VMW_PAGE_ALLOC_NOSLEEP (__GFP_HIGHMEM|__GFP_NOWARN)
+
+/*
+ * Use GFP_HIGHUSER when executing in a separate kernel thread
+ * context and allocation can sleep. This is less stressful to
+ * the guest memory system, since it allows the thread to block
+ * while memory is reclaimed, and won't take pages from emergency
+ * low-memory pools.
+ */
+#define VMW_PAGE_ALLOC_CANSLEEP (GFP_HIGHUSER)
+
+/* Maximum number of page allocations without yielding processor */
+#define VMW_BALLOON_YIELD_THRESHOLD 1024
+
+/* Maximum number of refused pages we accumulate during inflation cycle */
+#define VMW_BALLOON_MAX_REFUSED 16
+
+/*
+ * Hypervisor communication port definitions.
+ */
+#define VMW_BALLOON_HV_PORT 0x5670
+#define VMW_BALLOON_HV_MAGIC 0x456c6d6f
+#define VMW_BALLOON_PROTOCOL_VERSION 2
+#define VMW_BALLOON_GUEST_ID 1 /* Linux */
+
+#define VMW_BALLOON_CMD_START 0
+#define VMW_BALLOON_CMD_GET_TARGET 1
+#define VMW_BALLOON_CMD_LOCK 2
+#define VMW_BALLOON_CMD_UNLOCK 3
+#define VMW_BALLOON_CMD_GUEST_ID 4
+
+/* error codes */
+#define VMW_BALLOON_SUCCESS 0
+#define VMW_BALLOON_FAILURE -1
+#define VMW_BALLOON_ERROR_CMD_INVALID 1
+#define VMW_BALLOON_ERROR_PPN_INVALID 2
+#define VMW_BALLOON_ERROR_PPN_LOCKED 3
+#define VMW_BALLOON_ERROR_PPN_UNLOCKED 4
+#define VMW_BALLOON_ERROR_PPN_PINNED 5
+#define VMW_BALLOON_ERROR_PPN_NOTNEEDED 6
+#define VMW_BALLOON_ERROR_RESET 7
+#define VMW_BALLOON_ERROR_BUSY 8
+
+#define VMWARE_BALLOON_CMD(cmd, data, result) \
+({ \
+ unsigned long __stat, __dummy1, __dummy2; \
+ __asm__ __volatile__ ("inl (%%dx)" : \
+ "=a"(__stat), \
+ "=c"(__dummy1), \
+ "=d"(__dummy2), \
+ "=b"(result) : \
+ "0"(VMW_BALLOON_HV_MAGIC), \
+ "1"(VMW_BALLOON_CMD_##cmd), \
+ "2"(VMW_BALLOON_HV_PORT), \
+ "3"(data) : \
+ "memory"); \
+ result &= -1UL; \
+ __stat & -1UL; \
+})
+
+#ifdef CONFIG_DEBUG_FS
+struct vmballoon_stats {
+ unsigned int timer;
+
+ /* allocation statistics */
+ unsigned int alloc;
+ unsigned int alloc_fail;
+ unsigned int sleep_alloc;
+ unsigned int sleep_alloc_fail;
+ unsigned int refused_alloc;
+ unsigned int refused_free;
+ unsigned int free;
+
+ /* monitor operations */
+ unsigned int lock;
+ unsigned int lock_fail;
+ unsigned int unlock;
+ unsigned int unlock_fail;
+ unsigned int target;
+ unsigned int target_fail;
+ unsigned int start;
+ unsigned int start_fail;
+ unsigned int guest_type;
+ unsigned int guest_type_fail;
+};
+
+#define STATS_INC(stat) (stat)++
+#else
+#define STATS_INC(stat)
+#endif
+
+struct vmballoon {
+
+ /* list of reserved physical pages */
+ struct list_head pages;
+
+ /* transient list of non-balloonable pages */
+ struct list_head refused_pages;
+ unsigned int n_refused_pages;
+
+ /* balloon size in pages */
+ unsigned int size;
+ unsigned int target;
+
+ /* reset flag */
+ bool reset_required;
+
+ /* adjustment rates (pages per second) */
+ unsigned int rate_alloc;
+ unsigned int rate_free;
+
+ /* slowdown page allocations for next few cycles */
+ unsigned int slow_allocation_cycles;
+
+#ifdef CONFIG_DEBUG_FS
+ /* statistics */
+ struct vmballoon_stats stats;
+
+ /* debugfs file exporting statistics */
+ struct dentry *dbg_entry;
+#endif
+
+ struct sysinfo sysinfo;
+
+ struct delayed_work dwork;
+};
+
+static struct vmballoon balloon;
+
+/*
+ * Send "start" command to the host, communicating supported version
+ * of the protocol.
+ */
+static bool vmballoon_send_start(struct vmballoon *b)
+{
+ unsigned long status, dummy;
+
+ STATS_INC(b->stats.start);
+
+ status = VMWARE_BALLOON_CMD(START, VMW_BALLOON_PROTOCOL_VERSION, dummy);
+ if (status == VMW_BALLOON_SUCCESS)
+ return true;
+
+ pr_debug("%s - failed, hv returns %ld\n", __func__, status);
+ STATS_INC(b->stats.start_fail);
+ return false;
+}
+
+static bool vmballoon_check_status(struct vmballoon *b, unsigned long status)
+{
+ switch (status) {
+ case VMW_BALLOON_SUCCESS:
+ return true;
+
+ case VMW_BALLOON_ERROR_RESET:
+ b->reset_required = true;
+ /* fall through */
+
+ default:
+ return false;
+ }
+}
+
+/*
+ * Communicate guest type to the host so that it can adjust ballooning
+ * algorithm to the one most appropriate for the guest. This command
+ * is normally issued after sending "start" command and is part of
+ * standard reset sequence.
+ */
+static bool vmballoon_send_guest_id(struct vmballoon *b)
+{
+ unsigned long status, dummy;
+
+ status = VMWARE_BALLOON_CMD(GUEST_ID, VMW_BALLOON_GUEST_ID, dummy);
+
+ STATS_INC(b->stats.guest_type);
+
+ if (vmballoon_check_status(b, status))
+ return true;
+
+ pr_debug("%s - failed, hv returns %ld\n", __func__, status);
+ STATS_INC(b->stats.guest_type_fail);
+ return false;
+}
+
+/*
+ * Retrieve desired balloon size from the host.
+ */
+static bool vmballoon_send_get_target(struct vmballoon *b, u32 *new_target)
+{
+ unsigned long status;
+ unsigned long target;
+ unsigned long limit;
+ u32 limit32;
+
+ /*
+ * si_meminfo() is cheap. Moreover, we want to provide dynamic
+ * max balloon size later. So let us call si_meminfo() every
+ * iteration.
+ */
+ si_meminfo(&b->sysinfo);
+ limit = b->sysinfo.totalram;
+
+ /* Ensure limit fits in 32-bits */
+ limit32 = (u32)limit;
+ if (limit != limit32)
+ return false;
+
+ /* update stats */
+ STATS_INC(b->stats.target);
+
+ status = VMWARE_BALLOON_CMD(GET_TARGET, limit, target);
+ if (vmballoon_check_status(b, status)) {
+ *new_target = target;
+ return true;
+ }
+
+ pr_debug("%s - failed, hv returns %ld\n", __func__, status);
+ STATS_INC(b->stats.target_fail);
+ return false;
+}
+
+/*
+ * Notify the host about allocated page so that host can use it without
+ * fear that guest will need it. Host may reject some pages, we need to
+ * check the return value and maybe submit a different page.
+ */
+static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
+ unsigned int *hv_status)
+{
+ unsigned long status, dummy;
+ u32 pfn32;
+
+ pfn32 = (u32)pfn;
+ if (pfn32 != pfn)
+ return -1;
+
+ STATS_INC(b->stats.lock);
+
+ *hv_status = status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy);
+ if (vmballoon_check_status(b, status))
+ return 0;
+
+ pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
+ STATS_INC(b->stats.lock_fail);
+ return 1;
+}
+
+/*
+ * Notify the host that guest intends to release given page back into
+ * the pool of available (to the guest) pages.
+ */
+static bool vmballoon_send_unlock_page(struct vmballoon *b, unsigned long pfn)
+{
+ unsigned long status, dummy;
+ u32 pfn32;
+
+ pfn32 = (u32)pfn;
+ if (pfn32 != pfn)
+ return false;
+
+ STATS_INC(b->stats.unlock);
+
+ status = VMWARE_BALLOON_CMD(UNLOCK, pfn, dummy);
+ if (vmballoon_check_status(b, status))
+ return true;
+
+ pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
+ STATS_INC(b->stats.unlock_fail);
+ return false;
+}
+
+/*
+ * Quickly release all pages allocated for the balloon. This function is
+ * called when host decides to "reset" balloon for one reason or another.
+ * Unlike normal "deflate" we do not (shall not) notify host of the pages
+ * being released.
+ */
+static void vmballoon_pop(struct vmballoon *b)
+{
+ struct page *page, *next;
+ unsigned int count = 0;
+
+ list_for_each_entry_safe(page, next, &b->pages, lru) {
+ list_del(&page->lru);
+ __free_page(page);
+ STATS_INC(b->stats.free);
+ b->size--;
+
+ if (++count >= b->rate_free) {
+ count = 0;
+ cond_resched();
+ }
+ }
+}
+
+/*
+ * Perform standard reset sequence by popping the balloon (in case it
+ * is not empty) and then restarting protocol. This operation normally
+ * happens when host responds with VMW_BALLOON_ERROR_RESET to a command.
+ */
+static void vmballoon_reset(struct vmballoon *b)
+{
+ /* free all pages, skipping monitor unlock */
+ vmballoon_pop(b);
+
+ if (vmballoon_send_start(b)) {
+ b->reset_required = false;
+ if (!vmballoon_send_guest_id(b))
+ pr_err("failed to send guest ID to the host\n");
+ }
+}
+
+/*
+ * Allocate (or reserve) a page for the balloon and notify the host. If host
+ * refuses the page put it on "refuse" list and allocate another one until host
+ * is satisfied. "Refused" pages are released at the end of inflation cycle
+ * (when we allocate b->rate_alloc pages).
+ */
+static int vmballoon_reserve_page(struct vmballoon *b, bool can_sleep)
+{
+ struct page *page;
+ gfp_t flags;
+ unsigned int hv_status;
+ int locked;
+ flags = can_sleep ? VMW_PAGE_ALLOC_CANSLEEP : VMW_PAGE_ALLOC_NOSLEEP;
+
+ do {
+ if (!can_sleep)
+ STATS_INC(b->stats.alloc);
+ else
+ STATS_INC(b->stats.sleep_alloc);
+
+ page = alloc_page(flags);
+ if (!page) {
+ if (!can_sleep)
+ STATS_INC(b->stats.alloc_fail);
+ else
+ STATS_INC(b->stats.sleep_alloc_fail);
+ return -ENOMEM;
+ }
+
+ /* inform monitor */
+ locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status);
+ if (locked > 0) {
+ STATS_INC(b->stats.refused_alloc);
+
+ if (hv_status == VMW_BALLOON_ERROR_RESET ||
+ hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) {
+ __free_page(page);
+ return -EIO;
+ }
+
+ /*
+ * Place page on the list of non-balloonable pages
+ * and retry allocation, unless we already accumulated
+ * too many of them, in which case take a breather.
+ */
+ list_add(&page->lru, &b->refused_pages);
+ if (++b->n_refused_pages >= VMW_BALLOON_MAX_REFUSED)
+ return -EIO;
+ }
+ } while (locked != 0);
+
+ /* track allocated page */
+ list_add(&page->lru, &b->pages);
+
+ /* update balloon size */
+ b->size++;
+
+ return 0;
+}
+
+/*
+ * Release the page allocated for the balloon. Note that we first notify
+ * the host so it can make sure the page will be available for the guest
+ * to use, if needed.
+ */
+static int vmballoon_release_page(struct vmballoon *b, struct page *page)
+{
+ if (!vmballoon_send_unlock_page(b, page_to_pfn(page)))
+ return -EIO;
+
+ list_del(&page->lru);
+
+ /* deallocate page */
+ __free_page(page);
+ STATS_INC(b->stats.free);
+
+ /* update balloon size */
+ b->size--;
+
+ return 0;
+}
+
+/*
+ * Release pages that were allocated while attempting to inflate the
+ * balloon but were refused by the host for one reason or another.
+ */
+static void vmballoon_release_refused_pages(struct vmballoon *b)
+{
+ struct page *page, *next;
+
+ list_for_each_entry_safe(page, next, &b->refused_pages, lru) {
+ list_del(&page->lru);
+ __free_page(page);
+ STATS_INC(b->stats.refused_free);
+ }
+
+ b->n_refused_pages = 0;
+}
+
+/*
+ * Inflate the balloon towards its target size. Note that we try to limit
+ * the rate of allocation to make sure we are not choking the rest of the
+ * system.
+ */
+static void vmballoon_inflate(struct vmballoon *b)
+{
+ unsigned int goal;
+ unsigned int rate;
+ unsigned int i;
+ unsigned int allocations = 0;
+ int error = 0;
+ bool alloc_can_sleep = false;
+
+ pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
+
+ /*
+ * First try NOSLEEP page allocations to inflate balloon.
+ *
+ * If we do not throttle nosleep allocations, we can drain all
+ * free pages in the guest quickly (if the balloon target is high).
+ * As a side-effect, draining free pages helps to inform (force)
+ * the guest to start swapping if balloon target is not met yet,
+ * which is a desired behavior. However, balloon driver can consume
+ * all available CPU cycles if too many pages are allocated in a
+ * second. Therefore, we throttle nosleep allocations even when
+ * the guest is not under memory pressure. OTOH, if we have already
+ * predicted that the guest is under memory pressure, then we
+ * slowdown page allocations considerably.
+ */
+
+ goal = b->target - b->size;
+ /*
+ * Start with no sleep allocation rate which may be higher
+ * than sleeping allocation rate.
+ */
+ rate = b->slow_allocation_cycles ?
+ b->rate_alloc : VMW_BALLOON_NOSLEEP_ALLOC_MAX;
+
+ pr_debug("%s - goal: %d, no-sleep rate: %d, sleep rate: %d\n",
+ __func__, goal, rate, b->rate_alloc);
+
+ for (i = 0; i < goal; i++) {
+
+ error = vmballoon_reserve_page(b, alloc_can_sleep);
+ if (error) {
+ if (error != -ENOMEM) {
+ /*
+ * Not a page allocation failure, stop this
+ * cycle. Maybe we'll get new target from
+ * the host soon.
+ */
+ break;
+ }
+
+ if (alloc_can_sleep) {
+ /*
+ * CANSLEEP page allocation failed, so guest
+ * is under severe memory pressure. Quickly
+ * decrease allocation rate.
+ */
+ b->rate_alloc = max(b->rate_alloc / 2,
+ VMW_BALLOON_RATE_ALLOC_MIN);
+ break;
+ }
+
+ /*
+ * NOSLEEP page allocation failed, so the guest is
+ * under memory pressure. Let us slow down page
+ * allocations for next few cycles so that the guest
+ * gets out of memory pressure. Also, if we already
+ * allocated b->rate_alloc pages, let's pause,
+ * otherwise switch to sleeping allocations.
+ */
+ b->slow_allocation_cycles = VMW_BALLOON_SLOW_CYCLES;
+
+ if (i >= b->rate_alloc)
+ break;
+
+ alloc_can_sleep = true;
+ /* Lower rate for sleeping allocations. */
+ rate = b->rate_alloc;
+ }
+
+ if (++allocations > VMW_BALLOON_YIELD_THRESHOLD) {
+ cond_resched();
+ allocations = 0;
+ }
+
+ if (i >= rate) {
+ /* We allocated enough pages, let's take a break. */
+ break;
+ }
+ }
+
+ /*
+ * We reached our goal without failures so try increasing
+ * allocation rate.
+ */
+ if (error == 0 && i >= b->rate_alloc) {
+ unsigned int mult = i / b->rate_alloc;
+
+ b->rate_alloc =
+ min(b->rate_alloc + mult * VMW_BALLOON_RATE_ALLOC_INC,
+ VMW_BALLOON_RATE_ALLOC_MAX);
+ }
+
+ vmballoon_release_refused_pages(b);
+}
+
+/*
+ * Decrease the size of the balloon allowing guest to use more memory.
+ */
+static void vmballoon_deflate(struct vmballoon *b)
+{
+ struct page *page, *next;
+ unsigned int i = 0;
+ unsigned int goal;
+ int error;
+
+ pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
+
+ /* limit deallocation rate */
+ goal = min(b->size - b->target, b->rate_free);
+
+ pr_debug("%s - goal: %d, rate: %d\n", __func__, goal, b->rate_free);
+
+ /* free pages to reach target */
+ list_for_each_entry_safe(page, next, &b->pages, lru) {
+ error = vmballoon_release_page(b, page);
+ if (error) {
+ /* quickly decrease rate in case of error */
+ b->rate_free = max(b->rate_free / 2,
+ VMW_BALLOON_RATE_FREE_MIN);
+ return;
+ }
+
+ if (++i >= goal)
+ break;
+ }
+
+ /* slowly increase rate if there were no errors */
+ b->rate_free = min(b->rate_free + VMW_BALLOON_RATE_FREE_INC,
+ VMW_BALLOON_RATE_FREE_MAX);
+}
+
+/*
+ * Balloon work function: reset protocol, if needed, get the new size and
+ * adjust balloon as needed. Repeat in 1 sec.
+ */
+static void vmballoon_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct vmballoon *b = container_of(dwork, struct vmballoon, dwork);
+ unsigned int target;
+
+ STATS_INC(b->stats.timer);
+
+ if (b->reset_required)
+ vmballoon_reset(b);
+
+ if (b->slow_allocation_cycles > 0)
+ b->slow_allocation_cycles--;
+
+ if (vmballoon_send_get_target(b, &target)) {
+ /* update target, adjust size */
+ b->target = target;
+
+ if (b->size < target)
+ vmballoon_inflate(b);
+ else if (b->size > target)
+ vmballoon_deflate(b);
+ }
+
+ /*
+ * We are using a freezable workqueue so that balloon operations are
+ * stopped while the system transitions to/from sleep/hibernation.
+ */
+ queue_delayed_work(system_freezable_wq,
+ dwork, round_jiffies_relative(HZ));
+}
+
+/*
+ * DEBUGFS Interface
+ */
+#ifdef CONFIG_DEBUG_FS
+
+static int vmballoon_debug_show(struct seq_file *f, void *offset)
+{
+ struct vmballoon *b = f->private;
+ struct vmballoon_stats *stats = &b->stats;
+
+ /* format size info */
+ seq_printf(f,
+ "target: %8d pages\n"
+ "current: %8d pages\n",
+ b->target, b->size);
+
+ /* format rate info */
+ seq_printf(f,
+ "rateNoSleepAlloc: %8d pages/sec\n"
+ "rateSleepAlloc: %8d pages/sec\n"
+ "rateFree: %8d pages/sec\n",
+ VMW_BALLOON_NOSLEEP_ALLOC_MAX,
+ b->rate_alloc, b->rate_free);
+
+ seq_printf(f,
+ "\n"
+ "timer: %8u\n"
+ "start: %8u (%4u failed)\n"
+ "guestType: %8u (%4u failed)\n"
+ "lock: %8u (%4u failed)\n"
+ "unlock: %8u (%4u failed)\n"
+ "target: %8u (%4u failed)\n"
+ "primNoSleepAlloc: %8u (%4u failed)\n"
+ "primCanSleepAlloc: %8u (%4u failed)\n"
+ "primFree: %8u\n"
+ "errAlloc: %8u\n"
+ "errFree: %8u\n",
+ stats->timer,
+ stats->start, stats->start_fail,
+ stats->guest_type, stats->guest_type_fail,
+ stats->lock, stats->lock_fail,
+ stats->unlock, stats->unlock_fail,
+ stats->target, stats->target_fail,
+ stats->alloc, stats->alloc_fail,
+ stats->sleep_alloc, stats->sleep_alloc_fail,
+ stats->free,
+ stats->refused_alloc, stats->refused_free);
+
+ return 0;
+}
+
+static int vmballoon_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, vmballoon_debug_show, inode->i_private);
+}
+
+static const struct file_operations vmballoon_debug_fops = {
+ .owner = THIS_MODULE,
+ .open = vmballoon_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init vmballoon_debugfs_init(struct vmballoon *b)
+{
+ int error;
+
+ b->dbg_entry = debugfs_create_file("vmmemctl", S_IRUGO, NULL, b,
+ &vmballoon_debug_fops);
+ if (IS_ERR(b->dbg_entry)) {
+ error = PTR_ERR(b->dbg_entry);
+ pr_err("failed to create debugfs entry, error: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static void __exit vmballoon_debugfs_exit(struct vmballoon *b)
+{
+ debugfs_remove(b->dbg_entry);
+}
+
+#else
+
+static inline int vmballoon_debugfs_init(struct vmballoon *b)
+{
+ return 0;
+}
+
+static inline void vmballoon_debugfs_exit(struct vmballoon *b)
+{
+}
+
+#endif /* CONFIG_DEBUG_FS */
+
+static int __init vmballoon_init(void)
+{
+ int error;
+
+ /*
+ * Check if we are running on VMware's hypervisor and bail out
+ * if we are not.
+ */
+ if (x86_hyper != &x86_hyper_vmware)
+ return -ENODEV;
+
+ INIT_LIST_HEAD(&balloon.pages);
+ INIT_LIST_HEAD(&balloon.refused_pages);
+
+ /* initialize rates */
+ balloon.rate_alloc = VMW_BALLOON_RATE_ALLOC_MAX;
+ balloon.rate_free = VMW_BALLOON_RATE_FREE_MAX;
+
+ INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work);
+
+ /*
+ * Start balloon.
+ */
+ if (!vmballoon_send_start(&balloon)) {
+ pr_err("failed to send start command to the host\n");
+ return -EIO;
+ }
+
+ if (!vmballoon_send_guest_id(&balloon)) {
+ pr_err("failed to send guest ID to the host\n");
+ return -EIO;
+ }
+
+ error = vmballoon_debugfs_init(&balloon);
+ if (error)
+ return error;
+
+ queue_delayed_work(system_freezable_wq, &balloon.dwork, 0);
+
+ return 0;
+}
+module_init(vmballoon_init);
+
+static void __exit vmballoon_exit(void)
+{
+ cancel_delayed_work_sync(&balloon.dwork);
+
+ vmballoon_debugfs_exit(&balloon);
+
+ /*
+ * Deallocate all reserved memory, and reset connection with monitor.
+ * Reset connection before deallocating memory to avoid potential for
+ * additional spurious resets from guest touching deallocated pages.
+ */
+ vmballoon_send_start(&balloon);
+ vmballoon_pop(&balloon);
+}
+module_exit(vmballoon_exit);
diff --git a/drivers/misc/wl127x-rfkill.c b/drivers/misc/wl127x-rfkill.c
new file mode 100644
index 00000000..f5b95152
--- /dev/null
+++ b/drivers/misc/wl127x-rfkill.c
@@ -0,0 +1,121 @@
+/*
+ * Bluetooth TI wl127x rfkill power control via GPIO
+ *
+ * Copyright (C) 2009 Motorola, Inc.
+ * Copyright (C) 2008 Texas Instruments
+ * Initial code: Pavan Savoy <pavan.savoy@gmail.com> (wl127x_power.c)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/rfkill.h>
+#include <linux/platform_device.h>
+#include <linux/wl127x-rfkill.h>
+
+static int wl127x_rfkill_set_power(void *data, enum rfkill_state state)
+{
+ int nshutdown_gpio = (int) data;
+
+ switch (state) {
+ case RFKILL_STATE_UNBLOCKED:
+ gpio_set_value(nshutdown_gpio, 1);
+ break;
+ case RFKILL_STATE_SOFT_BLOCKED:
+ gpio_set_value(nshutdown_gpio, 0);
+ break;
+ default:
+ printk(KERN_ERR "invalid bluetooth rfkill state %d\n", state);
+ }
+ return 0;
+}
+
+static int wl127x_rfkill_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+ struct wl127x_rfkill_platform_data *pdata = pdev->dev.platform_data;
+ enum rfkill_state default_state = RFKILL_STATE_SOFT_BLOCKED; /* off */
+
+ rc = gpio_request(pdata->nshutdown_gpio, "wl127x_nshutdown_gpio");
+ if (unlikely(rc))
+ return rc;
+
+ rc = gpio_direction_output(pdata->nshutdown_gpio, 0);
+ if (unlikely(rc))
+ return rc;
+
+ rfkill_set_default(RFKILL_TYPE_BLUETOOTH, default_state);
+ wl127x_rfkill_set_power(NULL, default_state);
+
+ pdata->rfkill = rfkill_allocate(&pdev->dev, RFKILL_TYPE_BLUETOOTH);
+ if (unlikely(!pdata->rfkill))
+ return -ENOMEM;
+
+ pdata->rfkill->name = "wl127x";
+ pdata->rfkill->state = default_state;
+ /* userspace cannot take exclusive control */
+ pdata->rfkill->user_claim_unsupported = 1;
+ pdata->rfkill->user_claim = 0;
+ pdata->rfkill->data = (void *) pdata->nshutdown_gpio;
+ pdata->rfkill->toggle_radio = wl127x_rfkill_set_power;
+
+ rc = rfkill_register(pdata->rfkill);
+
+ if (unlikely(rc))
+ rfkill_free(pdata->rfkill);
+
+ return 0;
+}
+
+static int wl127x_rfkill_remove(struct platform_device *pdev)
+{
+ struct wl127x_rfkill_platform_data *pdata = pdev->dev.platform_data;
+
+ rfkill_unregister(pdata->rfkill);
+ rfkill_free(pdata->rfkill);
+ gpio_free(pdata->nshutdown_gpio);
+
+ return 0;
+}
+
+static struct platform_driver wl127x_rfkill_platform_driver = {
+ .probe = wl127x_rfkill_probe,
+ .remove = wl127x_rfkill_remove,
+ .driver = {
+ .name = "wl127x-rfkill",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init wl127x_rfkill_init(void)
+{
+ return platform_driver_register(&wl127x_rfkill_platform_driver);
+}
+
+static void __exit wl127x_rfkill_exit(void)
+{
+ platform_driver_unregister(&wl127x_rfkill_platform_driver);
+}
+
+module_init(wl127x_rfkill_init);
+module_exit(wl127x_rfkill_exit);
+
+MODULE_ALIAS("platform:wl127x");
+MODULE_DESCRIPTION("wl127x-rfkill");
+MODULE_AUTHOR("Motorola");
+MODULE_LICENSE("GPL");