]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
[PATCH] spufs: The SPU file system, base
authorArnd Bergmann <arnd@arndb.de>
Tue, 15 Nov 2005 20:53:48 +0000 (15:53 -0500)
committerPaul Mackerras <paulus@samba.org>
Mon, 9 Jan 2006 03:49:12 +0000 (14:49 +1100)
This is the current version of the spu file system, used
for driving SPEs on the Cell Broadband Engine.

This release is almost identical to the version for the
2.6.14 kernel posted earlier, which is available as part
of the Cell BE Linux distribution from
http://www.bsc.es/projects/deepcomputing/linuxoncell/.

The first patch provides all the interfaces for running
spu application, but does not have any support for
debugging SPU tasks or for scheduling. Both these
functionalities are added in the subsequent patches.

See Documentation/filesystems/spufs.txt on how to use
spufs.

Signed-off-by: Arnd Bergmann <arndb@de.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
20 files changed:
Documentation/filesystems/spufs.txt [new file with mode: 0644]
arch/powerpc/Kconfig
arch/powerpc/kernel/systbl.S
arch/powerpc/mm/hash_utils_64.c
arch/powerpc/platforms/cell/Kconfig [new file with mode: 0644]
arch/powerpc/platforms/cell/Makefile
arch/powerpc/platforms/cell/spu_base.c [new file with mode: 0644]
arch/powerpc/platforms/cell/spu_syscalls.c [new file with mode: 0644]
arch/powerpc/platforms/cell/spufs/Makefile [new file with mode: 0644]
arch/powerpc/platforms/cell/spufs/context.c [new file with mode: 0644]
arch/powerpc/platforms/cell/spufs/file.c [new file with mode: 0644]
arch/powerpc/platforms/cell/spufs/inode.c [new file with mode: 0644]
arch/powerpc/platforms/cell/spufs/spufs.h [new file with mode: 0644]
arch/powerpc/platforms/cell/spufs/syscalls.c [new file with mode: 0644]
arch/ppc/kernel/ppc_ksyms.c
include/asm-powerpc/spu.h [new file with mode: 0644]
include/asm-powerpc/unistd.h
include/linux/syscalls.h
kernel/sys_ni.c
mm/memory.c

diff --git a/Documentation/filesystems/spufs.txt b/Documentation/filesystems/spufs.txt
new file mode 100644 (file)
index 0000000..8edc395
--- /dev/null
@@ -0,0 +1,521 @@
+SPUFS(2)                   Linux Programmer's Manual                  SPUFS(2)
+
+
+
+NAME
+       spufs - the SPU file system
+
+
+DESCRIPTION
+       The SPU file system is used on PowerPC machines that implement the Cell
+       Broadband Engine Architecture in order to access Synergistic  Processor
+       Units (SPUs).
+
+       The file system provides a name space similar to posix shared memory or
+       message queues. Users that have write permissions on  the  file  system
+       can use spu_create(2) to establish SPU contexts in the spufs root.
+
+       Every SPU context is represented by a directory containing a predefined
+       set of files. These files can be used for manipulating the state of the
+       logical SPU. Users can change permissions on those files, but not actu-
+       ally add or remove files.
+
+
+MOUNT OPTIONS
+       uid=<uid>
+              set the user owning the mount point, the default is 0 (root).
+
+       gid=<gid>
+              set the group owning the mount point, the default is 0 (root).
+
+
+FILES
+       The files in spufs mostly follow the standard behavior for regular sys-
+       tem  calls like read(2) or write(2), but often support only a subset of
+       the operations supported on regular file systems. This list details the
+       supported  operations  and  the  deviations  from  the behaviour in the
+       respective man pages.
+
+       All files that support the read(2) operation also support readv(2)  and
+       all  files  that support the write(2) operation also support writev(2).
+       All files support the access(2) and stat(2) family of  operations,  but
+       only  the  st_mode,  st_nlink,  st_uid and st_gid fields of struct stat
+       contain reliable information.
+
+       All files support the chmod(2)/fchmod(2) and chown(2)/fchown(2)  opera-
+       tions,  but  will  not be able to grant permissions that contradict the
+       possible operations, e.g. read access on the wbox file.
+
+       The current set of files is:
+
+
+   /mem
+       the contents of the local storage memory  of  the  SPU.   This  can  be
+       accessed  like  a regular shared memory file and contains both code and
+       data in the address space of the SPU.  The possible  operations  on  an
+       open mem file are:
+
+       read(2), pread(2), write(2), pwrite(2), lseek(2)
+              These  operate  as  documented, with the exception that seek(2),
+              write(2) and pwrite(2) are not supported beyond the end  of  the
+              file. The file size is the size of the local storage of the SPU,
+              which normally is 256 kilobytes.
+
+       mmap(2)
+              Mapping mem into the process address space gives access  to  the
+              SPU  local  storage  within  the  process  address  space.  Only
+              MAP_SHARED mappings are allowed.
+
+
+   /mbox
+       The first SPU to CPU communication mailbox. This file is read-only  and
+       can  be  read  in  units of 32 bits.  The file can only be used in non-
+       blocking mode and it even poll() will not block on  it.   The  possible
+       operations on an open mbox file are:
+
+       read(2)
+              If  a  count smaller than four is requested, read returns -1 and
+              sets errno to EINVAL.  If there is no data available in the mail
+              box,  the  return  value  is set to -1 and errno becomes EAGAIN.
+              When data has been read successfully, four bytes are  placed  in
+              the data buffer and the value four is returned.
+
+
+   /ibox
+       The  second  SPU  to CPU communication mailbox. This file is similar to
+       the first mailbox file, but can be read in blocking I/O mode,  and  the
+       poll  familiy of system calls can be used to wait for it.  The possible
+       operations on an open ibox file are:
+
+       read(2)
+              If a count smaller than four is requested, read returns  -1  and
+              sets errno to EINVAL.  If there is no data available in the mail
+              box and the file descriptor has been opened with O_NONBLOCK, the
+              return value is set to -1 and errno becomes EAGAIN.
+
+              If  there  is  no  data  available  in the mail box and the file
+              descriptor has been opened without  O_NONBLOCK,  the  call  will
+              block  until  the  SPU  writes to its interrupt mailbox channel.
+              When data has been read successfully, four bytes are  placed  in
+              the data buffer and the value four is returned.
+
+       poll(2)
+              Poll  on  the  ibox  file returns (POLLIN | POLLRDNORM) whenever
+              data is available for reading.
+
+
+   /wbox
+       The CPU to SPU communation mailbox. It is write-only can can be written
+       in  units  of  32  bits. If the mailbox is full, write() will block and
+       poll can be used to wait for it becoming  empty  again.   The  possible
+       operations  on  an open wbox file are: write(2) If a count smaller than
+       four is requested, write returns -1 and sets errno to EINVAL.  If there
+       is  no space available in the mail box and the file descriptor has been
+       opened with O_NONBLOCK, the return value is set to -1 and errno becomes
+       EAGAIN.
+
+       If  there is no space available in the mail box and the file descriptor
+       has been opened without O_NONBLOCK, the call will block until  the  SPU
+       reads  from  its PPE mailbox channel.  When data has been read success-
+       fully, four bytes are placed in the data buffer and the value  four  is
+       returned.
+
+       poll(2)
+              Poll  on  the  ibox file returns (POLLOUT | POLLWRNORM) whenever
+              space is available for writing.
+
+
+   /mbox_stat
+   /ibox_stat
+   /wbox_stat
+       Read-only files that contain the length of the current queue, i.e.  how
+       many  words  can  be  read  from  mbox or ibox or how many words can be
+       written to wbox without blocking.  The files can be read only in 4-byte
+       units  and  return  a  big-endian  binary integer number.  The possible
+       operations on an open *box_stat file are:
+
+       read(2)
+              If a count smaller than four is requested, read returns  -1  and
+              sets errno to EINVAL.  Otherwise, a four byte value is placed in
+              the data buffer, containing the number of elements that  can  be
+              read  from  (for  mbox_stat  and  ibox_stat)  or written to (for
+              wbox_stat) the respective mail box without blocking or resulting
+              in EAGAIN.
+
+
+   /npc
+   /decr
+   /decr_status
+   /spu_tag_mask
+   /event_mask
+   /srr0
+       Internal  registers  of  the SPU. The representation is an ASCII string
+       with the numeric value of the next instruction to  be  executed.  These
+       can  be  used in read/write mode for debugging, but normal operation of
+       programs should not rely on them because access to any of  them  except
+       npc requires an SPU context save and is therefore very inefficient.
+
+       The contents of these files are:
+
+       npc                 Next Program Counter
+
+       decr                SPU Decrementer
+
+       decr_status         Decrementer Status
+
+       spu_tag_mask        MFC tag mask for SPU DMA
+
+       event_mask          Event mask for SPU interrupts
+
+       srr0                Interrupt Return address register
+
+
+       The   possible   operations   on   an   open  npc,  decr,  decr_status,
+       spu_tag_mask, event_mask or srr0 file are:
+
+       read(2)
+              When the count supplied to the read call  is  shorter  than  the
+              required  length for the pointer value plus a newline character,
+              subsequent reads from the same file descriptor  will  result  in
+              completing  the string, regardless of changes to the register by
+              a running SPU task.  When a complete string has been  read,  all
+              subsequent read operations will return zero bytes and a new file
+              descriptor needs to be opened to read the value again.
+
+       write(2)
+              A write operation on the file results in setting the register to
+              the  value  given  in  the string. The string is parsed from the
+              beginning to the first non-numeric character or the end  of  the
+              buffer.  Subsequent writes to the same file descriptor overwrite
+              the previous setting.
+
+
+   /fpcr
+       This file gives access to the Floating Point Status and Control  Regis-
+       ter as a four byte long file. The operations on the fpcr file are:
+
+       read(2)
+              If  a  count smaller than four is requested, read returns -1 and
+              sets errno to EINVAL.  Otherwise, a four byte value is placed in
+              the data buffer, containing the current value of the fpcr regis-
+              ter.
+
+       write(2)
+              If a count smaller than four is requested, write returns -1  and
+              sets  errno  to  EINVAL.  Otherwise, a four byte value is copied
+              from the data buffer, updating the value of the fpcr register.
+
+
+   /signal1
+   /signal2
+       The two signal notification channels of an SPU.  These  are  read-write
+       files  that  operate  on  a 32 bit word.  Writing to one of these files
+       triggers an interrupt on the SPU. The  value  writting  to  the  signal
+       files can be read from the SPU through a channel read or from host user
+       space through the file.  After the value has been read by the  SPU,  it
+       is  reset  to zero.  The possible operations on an open signal1 or sig-
+       nal2 file are:
+
+       read(2)
+              If a count smaller than four is requested, read returns  -1  and
+              sets errno to EINVAL.  Otherwise, a four byte value is placed in
+              the data buffer, containing the current value of  the  specified
+              signal notification register.
+
+       write(2)
+              If  a count smaller than four is requested, write returns -1 and
+              sets errno to EINVAL.  Otherwise, a four byte  value  is  copied
+              from the data buffer, updating the value of the specified signal
+              notification register.  The signal  notification  register  will
+              either be replaced with the input data or will be updated to the
+              bitwise OR or the old value and the input data, depending on the
+              contents  of  the  signal1_type,  or  signal2_type respectively,
+              file.
+
+
+   /signal1_type
+   /signal2_type
+       These two files change the behavior of the signal1 and signal2  notifi-
+       cation  files.  The  contain  a numerical ASCII string which is read as
+       either "1" or "0".  In mode 0 (overwrite), the  hardware  replaces  the
+       contents of the signal channel with the data that is written to it.  in
+       mode 1 (logical OR), the hardware accumulates the bits that are  subse-
+       quently written to it.  The possible operations on an open signal1_type
+       or signal2_type file are:
+
+       read(2)
+              When the count supplied to the read call  is  shorter  than  the
+              required  length  for the digit plus a newline character, subse-
+              quent reads from the same file descriptor will  result  in  com-
+              pleting  the  string.  When a complete string has been read, all
+              subsequent read operations will return zero bytes and a new file
+              descriptor needs to be opened to read the value again.
+
+       write(2)
+              A write operation on the file results in setting the register to
+              the value given in the string. The string  is  parsed  from  the
+              beginning  to  the first non-numeric character or the end of the
+              buffer.  Subsequent writes to the same file descriptor overwrite
+              the previous setting.
+
+
+EXAMPLES
+       /etc/fstab entry
+              none      /spu      spufs     gid=spu   0    0
+
+
+AUTHORS
+       Arnd  Bergmann  <arndb@de.ibm.com>,  Mark  Nutter <mnutter@us.ibm.com>,
+       Ulrich Weigand <Ulrich.Weigand@de.ibm.com>
+
+SEE ALSO
+       capabilities(7), close(2), spu_create(2), spu_run(2), spufs(7)
+
+
+
+Linux                             2005-09-28                          SPUFS(2)
+
+------------------------------------------------------------------------------
+
+SPU_RUN(2)                 Linux Programmer's Manual                SPU_RUN(2)
+
+
+
+NAME
+       spu_run - execute an spu context
+
+
+SYNOPSIS
+       #include <sys/spu.h>
+
+       int spu_run(int fd, unsigned int *npc, unsigned int *event);
+
+DESCRIPTION
+       The  spu_run system call is used on PowerPC machines that implement the
+       Cell Broadband Engine Architecture in order to access Synergistic  Pro-
+       cessor  Units  (SPUs).  It  uses the fd that was returned from spu_cre-
+       ate(2) to address a specific SPU context. When the context gets  sched-
+       uled  to a physical SPU, it starts execution at the instruction pointer
+       passed in npc.
+
+       Execution of SPU code happens synchronously, meaning that spu_run  does
+       not  return  while the SPU is still running. If there is a need to exe-
+       cute SPU code in parallel with other code on either  the  main  CPU  or
+       other  SPUs,  you  need to create a new thread of execution first, e.g.
+       using the pthread_create(3) call.
+
+       When spu_run returns, the current value of the SPU instruction  pointer
+       is  written back to npc, so you can call spu_run again without updating
+       the pointers.
+
+       event can be a NULL pointer or point to an extended  status  code  that
+       gets  filled  when spu_run returns. It can be one of the following con-
+       stants:
+
+       SPE_EVENT_DMA_ALIGNMENT
+              A DMA alignment error
+
+       SPE_EVENT_SPE_DATA_SEGMENT
+              A DMA segmentation error
+
+       SPE_EVENT_SPE_DATA_STORAGE
+              A DMA storage error
+
+       If NULL is passed as the event argument, these errors will result in  a
+       signal delivered to the calling process.
+
+RETURN VALUE
+       spu_run  returns the value of the spu_status register or -1 to indicate
+       an error and set errno to one of the error  codes  listed  below.   The
+       spu_status  register  value  contains  a  bit  mask of status codes and
+       optionally a 14 bit code returned from the stop-and-signal  instruction
+       on the SPU. The bit masks for the status codes are:
+
+       0x02   SPU was stopped by stop-and-signal.
+
+       0x04   SPU was stopped by halt.
+
+       0x08   SPU is waiting for a channel.
+
+       0x10   SPU is in single-step mode.
+
+       0x20   SPU has tried to execute an invalid instruction.
+
+       0x40   SPU has tried to access an invalid channel.
+
+       0x3fff0000
+              The  bits  masked with this value contain the code returned from
+              stop-and-signal.
+
+       There are always one or more of the lower eight bits set  or  an  error
+       code is returned from spu_run.
+
+ERRORS
+       EAGAIN or EWOULDBLOCK
+              fd is in non-blocking mode and spu_run would block.
+
+       EBADF  fd is not a valid file descriptor.
+
+       EFAULT npc is not a valid pointer or status is neither NULL nor a valid
+              pointer.
+
+       EINTR  A signal occured while spu_run was in progress.  The  npc  value
+              has  been updated to the new program counter value if necessary.
+
+       EINVAL fd is not a file descriptor returned from spu_create(2).
+
+       ENOMEM Insufficient memory was available to handle a page fault result-
+              ing from an MFC direct memory access.
+
+       ENOSYS the functionality is not provided by the current system, because
+              either the hardware does not provide SPUs or the spufs module is
+              not loaded.
+
+
+NOTES
+       spu_run  is  meant  to  be  used  from  libraries that implement a more
+       abstract interface to SPUs, not to be used from  regular  applications.
+       See  http://www.bsc.es/projects/deepcomputing/linuxoncell/ for the rec-
+       ommended libraries.
+
+
+CONFORMING TO
+       This call is Linux specific and only implemented by the ppc64 architec-
+       ture. Programs using this system call are not portable.
+
+
+BUGS
+       The code does not yet fully implement all features lined out here.
+
+
+AUTHOR
+       Arnd Bergmann <arndb@de.ibm.com>
+
+SEE ALSO
+       capabilities(7), close(2), spu_create(2), spufs(7)
+
+
+
+Linux                             2005-09-28                        SPU_RUN(2)
+
+------------------------------------------------------------------------------
+
+SPU_CREATE(2)              Linux Programmer's Manual             SPU_CREATE(2)
+
+
+
+NAME
+       spu_create - create a new spu context
+
+
+SYNOPSIS
+       #include <sys/types.h>
+       #include <sys/spu.h>
+
+       int spu_create(const char *pathname, int flags, mode_t mode);
+
+DESCRIPTION
+       The  spu_create  system call is used on PowerPC machines that implement
+       the Cell Broadband Engine Architecture in order to  access  Synergistic
+       Processor  Units (SPUs). It creates a new logical context for an SPU in
+       pathname and returns a handle to associated  with  it.   pathname  must
+       point  to  a  non-existing directory in the mount point of the SPU file
+       system (spufs).  When spu_create is successful, a directory  gets  cre-
+       ated on pathname and it is populated with files.
+
+       The  returned  file  handle can only be passed to spu_run(2) or closed,
+       other operations are not defined on it. When it is closed, all  associ-
+       ated  directory entries in spufs are removed. When the last file handle
+       pointing either inside  of  the  context  directory  or  to  this  file
+       descriptor is closed, the logical SPU context is destroyed.
+
+       The  parameter flags can be zero or any bitwise or'd combination of the
+       following constants:
+
+       SPU_RAWIO
+              Allow mapping of some of the hardware registers of the SPU  into
+              user space. This flag requires the CAP_SYS_RAWIO capability, see
+              capabilities(7).
+
+       The mode parameter specifies the permissions used for creating the  new
+       directory  in  spufs.   mode is modified with the user's umask(2) value
+       and then used for both the directory and the files contained in it. The
+       file permissions mask out some more bits of mode because they typically
+       support only read or write access. See stat(2) for a full list  of  the
+       possible mode values.
+
+
+RETURN VALUE
+       spu_create  returns a new file descriptor. It may return -1 to indicate
+       an error condition and set errno to  one  of  the  error  codes  listed
+       below.
+
+
+ERRORS
+       EACCESS
+              The  current  user does not have write access on the spufs mount
+              point.
+
+       EEXIST An SPU context already exists at the given path name.
+
+       EFAULT pathname is not a valid string pointer in  the  current  address
+              space.
+
+       EINVAL pathname is not a directory in the spufs mount point.
+
+       ELOOP  Too many symlinks were found while resolving pathname.
+
+       EMFILE The process has reached its maximum open file limit.
+
+       ENAMETOOLONG
+              pathname was too long.
+
+       ENFILE The system has reached the global open file limit.
+
+       ENOENT Part of pathname could not be resolved.
+
+       ENOMEM The kernel could not allocate all resources required.
+
+       ENOSPC There  are  not  enough  SPU resources available to create a new
+              context or the user specific limit for the number  of  SPU  con-
+              texts has been reached.
+
+       ENOSYS the functionality is not provided by the current system, because
+              either the hardware does not provide SPUs or the spufs module is
+              not loaded.
+
+       ENOTDIR
+              A part of pathname is not a directory.
+
+
+
+NOTES
+       spu_create  is  meant  to  be used from libraries that implement a more
+       abstract interface to SPUs, not to be used from  regular  applications.
+       See  http://www.bsc.es/projects/deepcomputing/linuxoncell/ for the rec-
+       ommended libraries.
+
+
+FILES
+       pathname must point to a location beneath the mount point of spufs.  By
+       convention, it gets mounted in /spu.
+
+
+CONFORMING TO
+       This call is Linux specific and only implemented by the ppc64 architec-
+       ture. Programs using this system call are not portable.
+
+
+BUGS
+       The code does not yet fully implement all features lined out here.
+
+
+AUTHOR
+       Arnd Bergmann <arndb@de.ibm.com>
+
+SEE ALSO
+       capabilities(7), close(2), spu_run(2), spufs(7)
+
+
+
+Linux                             2005-09-28                     SPU_CREATE(2)
index 4d71aa3ecbb594d7097f054a3fdd1779f5a1a119..39ca7b9da3697e22e13f61e7627e66db40b9b062 100644 (file)
@@ -482,6 +482,7 @@ source arch/powerpc/platforms/embedded6xx/Kconfig
 source arch/powerpc/platforms/4xx/Kconfig
 source arch/powerpc/platforms/85xx/Kconfig
 source arch/powerpc/platforms/8xx/Kconfig
+source arch/powerpc/platforms/cell/Kconfig
 
 menu "Kernel options"
 
index 4bb3650420b48c4550dc005a7ac2a08f6c05948c..989f6286991a9a7aeb0d105b8dbca7d059101a3f 100644 (file)
@@ -319,3 +319,5 @@ COMPAT_SYS(ioprio_get)
 SYSCALL(inotify_init)
 SYSCALL(inotify_add_watch)
 SYSCALL(inotify_rm_watch)
+SYSCALL(spu_run)
+SYSCALL(spu_create)
index a606504678bd5aa318d3af992c4a117d96e41c66..846a1894cf95ff63259f8e5c431f3709af6261c5 100644 (file)
@@ -644,6 +644,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
        DBG_LOW(" -> rc=%d\n", rc);
        return rc;
 }
+EXPORT_SYMBOL_GPL(hash_page);
 
 void hash_preload(struct mm_struct *mm, unsigned long ea,
                  unsigned long access, unsigned long trap)
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
new file mode 100644 (file)
index 0000000..3157071
--- /dev/null
@@ -0,0 +1,13 @@
+menu "Cell Broadband Engine options"
+       depends on PPC_CELL
+
+config SPU_FS
+       tristate "SPU file system"
+       default m
+       depends on PPC_CELL
+       help
+         The SPU file system is used to access Synergistic Processing
+         Units on machines implementing the Broadband Processor
+         Architecture.
+
+endmenu
index 55e094b96bc0bbe8e1c1d3914762a3928e00371d..74616cf13af9840b37b71902f33d30b7e37afe34 100644 (file)
@@ -1,2 +1,5 @@
 obj-y                  += interrupt.o iommu.o setup.o spider-pic.o
 obj-$(CONFIG_SMP)      += smp.o
+obj-$(CONFIG_SPU_FS)   += spufs/ spu_base.o
+builtin-spufs-$(CONFIG_SPU_FS) += spu_syscalls.o
+obj-y                  += $(builtin-spufs-m)
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
new file mode 100644 (file)
index 0000000..9e90965
--- /dev/null
@@ -0,0 +1,740 @@
+/*
+ * Low-level SPU handling
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
+ *
+ * Author: Arnd Bergmann <arndb@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define DEBUG 1
+
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/semaphore.h>
+#include <asm/spu.h>
+#include <asm/mmu_context.h>
+
+#include "interrupt.h"
+
+static int __spu_trap_invalid_dma(struct spu *spu)
+{
+       pr_debug("%s\n", __FUNCTION__);
+       force_sig(SIGBUS, /* info, */ current);
+       return 0;
+}
+
+static int __spu_trap_dma_align(struct spu *spu)
+{
+       pr_debug("%s\n", __FUNCTION__);
+       force_sig(SIGBUS, /* info, */ current);
+       return 0;
+}
+
+static int __spu_trap_error(struct spu *spu)
+{
+       pr_debug("%s\n", __FUNCTION__);
+       force_sig(SIGILL, /* info, */ current);
+       return 0;
+}
+
+static void spu_restart_dma(struct spu *spu)
+{
+       struct spu_priv2 __iomem *priv2 = spu->priv2;
+       out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
+}
+
+static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
+{
+       struct spu_priv2 __iomem *priv2;
+       struct mm_struct *mm;
+
+       pr_debug("%s\n", __FUNCTION__);
+
+       if (REGION_ID(ea) != USER_REGION_ID) {
+               pr_debug("invalid region access at %016lx\n", ea);
+               return 1;
+       }
+
+       priv2 = spu->priv2;
+       mm = spu->mm;
+
+       if (spu->slb_replace >= 8)
+               spu->slb_replace = 0;
+
+       out_be64(&priv2->slb_index_W, spu->slb_replace);
+       out_be64(&priv2->slb_vsid_RW,
+               (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT)
+                                                | SLB_VSID_USER);
+       out_be64(&priv2->slb_esid_RW, (ea & ESID_MASK) | SLB_ESID_V);
+
+       spu_restart_dma(spu);
+
+       pr_debug("set slb %d context %lx, ea %016lx, vsid %016lx, esid %016lx\n",
+               spu->slb_replace, mm->context.id, ea,
+               (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT)| SLB_VSID_USER,
+                (ea & ESID_MASK) | SLB_ESID_V);
+       return 0;
+}
+
+static int __spu_trap_data_map(struct spu *spu, unsigned long ea)
+{
+       unsigned long dsisr;
+       struct spu_priv1 __iomem *priv1;
+
+       pr_debug("%s\n", __FUNCTION__);
+       priv1 = spu->priv1;
+       dsisr = in_be64(&priv1->mfc_dsisr_RW);
+
+       wake_up(&spu->stop_wq);
+
+       return 0;
+}
+
+static int __spu_trap_mailbox(struct spu *spu)
+{
+       wake_up_all(&spu->ibox_wq);
+       kill_fasync(&spu->ibox_fasync, SIGIO, POLLIN);
+
+       /* atomically disable SPU mailbox interrupts */
+       spin_lock(&spu->register_lock);
+       out_be64(&spu->priv1->int_mask_class2_RW,
+               in_be64(&spu->priv1->int_mask_class2_RW) & ~0x1);
+       spin_unlock(&spu->register_lock);
+       return 0;
+}
+
+static int __spu_trap_stop(struct spu *spu)
+{
+       pr_debug("%s\n", __FUNCTION__);
+       spu->stop_code = in_be32(&spu->problem->spu_status_R);
+       wake_up(&spu->stop_wq);
+       return 0;
+}
+
+static int __spu_trap_halt(struct spu *spu)
+{
+       pr_debug("%s\n", __FUNCTION__);
+       spu->stop_code = in_be32(&spu->problem->spu_status_R);
+       wake_up(&spu->stop_wq);
+       return 0;
+}
+
+static int __spu_trap_tag_group(struct spu *spu)
+{
+       pr_debug("%s\n", __FUNCTION__);
+       /* wake_up(&spu->dma_wq); */
+       return 0;
+}
+
+static int __spu_trap_spubox(struct spu *spu)
+{
+       wake_up_all(&spu->wbox_wq);
+       kill_fasync(&spu->wbox_fasync, SIGIO, POLLOUT);
+
+       /* atomically disable SPU mailbox interrupts */
+       spin_lock(&spu->register_lock);
+       out_be64(&spu->priv1->int_mask_class2_RW,
+               in_be64(&spu->priv1->int_mask_class2_RW) & ~0x10);
+       spin_unlock(&spu->register_lock);
+       return 0;
+}
+
+static irqreturn_t
+spu_irq_class_0(int irq, void *data, struct pt_regs *regs)
+{
+       struct spu *spu;
+
+       spu = data;
+       spu->class_0_pending = 1;
+       wake_up(&spu->stop_wq);
+
+       return IRQ_HANDLED;
+}
+
+static int
+spu_irq_class_0_bottom(struct spu *spu)
+{
+       unsigned long stat;
+
+       spu->class_0_pending = 0;
+
+       stat = in_be64(&spu->priv1->int_stat_class0_RW);
+
+       if (stat & 1) /* invalid MFC DMA */
+               __spu_trap_invalid_dma(spu);
+
+       if (stat & 2) /* invalid DMA alignment */
+               __spu_trap_dma_align(spu);
+
+       if (stat & 4) /* error on SPU */
+               __spu_trap_error(spu);
+
+       out_be64(&spu->priv1->int_stat_class0_RW, stat);
+       return 0;
+}
+
+static irqreturn_t
+spu_irq_class_1(int irq, void *data, struct pt_regs *regs)
+{
+       struct spu *spu;
+       unsigned long stat, dar;
+
+       spu = data;
+       stat  = in_be64(&spu->priv1->int_stat_class1_RW);
+       dar   = in_be64(&spu->priv1->mfc_dar_RW);
+
+       if (stat & 1) /* segment fault */
+               __spu_trap_data_seg(spu, dar);
+
+       if (stat & 2) { /* mapping fault */
+               __spu_trap_data_map(spu, dar);
+       }
+
+       if (stat & 4) /* ls compare & suspend on get */
+               ;
+
+       if (stat & 8) /* ls compare & suspend on put */
+               ;
+
+       out_be64(&spu->priv1->int_stat_class1_RW, stat);
+       return stat ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static irqreturn_t
+spu_irq_class_2(int irq, void *data, struct pt_regs *regs)
+{
+       struct spu *spu;
+       unsigned long stat;
+
+       spu = data;
+       stat = in_be64(&spu->priv1->int_stat_class2_RW);
+
+       pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat,
+               in_be64(&spu->priv1->int_mask_class2_RW));
+
+
+       if (stat & 1)  /* PPC core mailbox */
+               __spu_trap_mailbox(spu);
+
+       if (stat & 2) /* SPU stop-and-signal */
+               __spu_trap_stop(spu);
+
+       if (stat & 4) /* SPU halted */
+               __spu_trap_halt(spu);
+
+       if (stat & 8) /* DMA tag group complete */
+               __spu_trap_tag_group(spu);
+
+       if (stat & 0x10) /* SPU mailbox threshold */
+               __spu_trap_spubox(spu);
+
+       out_be64(&spu->priv1->int_stat_class2_RW, stat);
+       return stat ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int
+spu_request_irqs(struct spu *spu)
+{
+       int ret;
+       int irq_base;
+
+       irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET;
+
+       snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0", spu->number);
+       ret = request_irq(irq_base + spu->isrc,
+                spu_irq_class_0, 0, spu->irq_c0, spu);
+       if (ret)
+               goto out;
+       out_be64(&spu->priv1->int_mask_class0_RW, 0x7);
+
+       snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", spu->number);
+       ret = request_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc,
+                spu_irq_class_1, 0, spu->irq_c1, spu);
+       if (ret)
+               goto out1;
+       out_be64(&spu->priv1->int_mask_class1_RW, 0x3);
+
+       snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", spu->number);
+       ret = request_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc,
+                spu_irq_class_2, 0, spu->irq_c2, spu);
+       if (ret)
+               goto out2;
+       out_be64(&spu->priv1->int_mask_class2_RW, 0xe);
+       goto out;
+
+out2:
+       free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu);
+out1:
+       free_irq(irq_base + spu->isrc, spu);
+out:
+       return ret;
+}
+
+static void
+spu_free_irqs(struct spu *spu)
+{
+       int irq_base;
+
+       irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET;
+
+       free_irq(irq_base + spu->isrc, spu);
+       free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu);
+       free_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc, spu);
+}
+
+static LIST_HEAD(spu_list);
+static DECLARE_MUTEX(spu_mutex);
+
+static void spu_init_channels(struct spu *spu)
+{
+       static const struct {
+                unsigned channel;
+                unsigned count;
+       } zero_list[] = {
+               { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
+               { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
+       }, count_list[] = {
+               { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
+               { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
+               { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
+       };
+       struct spu_priv2 *priv2;
+       int i;
+
+       priv2 = spu->priv2;
+
+       /* initialize all channel data to zero */
+       for (i = 0; i < ARRAY_SIZE(zero_list); i++) {
+               int count;
+
+               out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel);
+               for (count = 0; count < zero_list[i].count; count++)
+                       out_be64(&priv2->spu_chnldata_RW, 0);
+       }
+
+       /* initialize channel counts to meaningful values */
+       for (i = 0; i < ARRAY_SIZE(count_list); i++) {
+               out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel);
+               out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
+       }
+}
+
+static void spu_init_regs(struct spu *spu)
+{
+       out_be64(&spu->priv1->int_mask_class0_RW, 0x7);
+       out_be64(&spu->priv1->int_mask_class1_RW, 0x3);
+       out_be64(&spu->priv1->int_mask_class2_RW, 0xe);
+}
+
+struct spu *spu_alloc(void)
+{
+       struct spu *spu;
+
+       down(&spu_mutex);
+       if (!list_empty(&spu_list)) {
+               spu = list_entry(spu_list.next, struct spu, list);
+               list_del_init(&spu->list);
+               pr_debug("Got SPU %x %d\n", spu->isrc, spu->number);
+       } else {
+               pr_debug("No SPU left\n");
+               spu = NULL;
+       }
+       up(&spu_mutex);
+
+       if (spu) {
+               spu_init_channels(spu);
+               spu_init_regs(spu);
+       }
+
+       return spu;
+}
+EXPORT_SYMBOL(spu_alloc);
+
+void spu_free(struct spu *spu)
+{
+       down(&spu_mutex);
+       spu->ibox_fasync = NULL;
+       spu->wbox_fasync = NULL;
+       list_add_tail(&spu->list, &spu_list);
+       up(&spu_mutex);
+}
+EXPORT_SYMBOL(spu_free);
+
+extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
+static int spu_handle_mm_fault(struct spu *spu)
+{
+       struct spu_priv1 __iomem *priv1;
+       struct mm_struct *mm = spu->mm;
+       struct vm_area_struct *vma;
+       u64 ea, dsisr, is_write;
+       int ret;
+
+       priv1 = spu->priv1;
+       ea = in_be64(&priv1->mfc_dar_RW);
+       dsisr = in_be64(&priv1->mfc_dsisr_RW);
+#if 0
+       if (!IS_VALID_EA(ea)) {
+               return -EFAULT;
+       }
+#endif /* XXX */
+       if (mm == NULL) {
+               return -EFAULT;
+       }
+       if (mm->pgd == NULL) {
+               return -EFAULT;
+       }
+
+       down_read(&mm->mmap_sem);
+       vma = find_vma(mm, ea);
+       if (!vma)
+               goto bad_area;
+       if (vma->vm_start <= ea)
+               goto good_area;
+       if (!(vma->vm_flags & VM_GROWSDOWN))
+               goto bad_area;
+#if 0
+       if (expand_stack(vma, ea))
+               goto bad_area;
+#endif /* XXX */
+good_area:
+       is_write = dsisr & MFC_DSISR_ACCESS_PUT;
+       if (is_write) {
+               if (!(vma->vm_flags & VM_WRITE))
+                       goto bad_area;
+       } else {
+               if (dsisr & MFC_DSISR_ACCESS_DENIED)
+                       goto bad_area;
+               if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+                       goto bad_area;
+       }
+       ret = 0;
+       switch (handle_mm_fault(mm, vma, ea, is_write)) {
+       case VM_FAULT_MINOR:
+               current->min_flt++;
+               break;
+       case VM_FAULT_MAJOR:
+               current->maj_flt++;
+               break;
+       case VM_FAULT_SIGBUS:
+               ret = -EFAULT;
+               goto bad_area;
+       case VM_FAULT_OOM:
+               ret = -ENOMEM;
+               goto bad_area;
+       default:
+               BUG();
+       }
+       up_read(&mm->mmap_sem);
+       return ret;
+
+bad_area:
+       up_read(&mm->mmap_sem);
+       return -EFAULT;
+}
+
+static int spu_handle_pte_fault(struct spu *spu)
+{
+       struct spu_priv1 __iomem *priv1;
+       u64 ea, dsisr, access, error = 0UL;
+       int ret = 0;
+
+       priv1 = spu->priv1;
+       ea = in_be64(&priv1->mfc_dar_RW);
+       dsisr = in_be64(&priv1->mfc_dsisr_RW);
+       access = (_PAGE_PRESENT | _PAGE_USER);
+       if (dsisr & MFC_DSISR_PTE_NOT_FOUND) {
+               if (hash_page(ea, access, 0x300) != 0)
+                       error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
+       }
+       if ((error & CLASS1_ENABLE_STORAGE_FAULT_INTR) ||
+           (dsisr & MFC_DSISR_ACCESS_DENIED)) {
+               if ((ret = spu_handle_mm_fault(spu)) != 0)
+                       error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
+               else
+                       error &= ~CLASS1_ENABLE_STORAGE_FAULT_INTR;
+       }
+       if (!error)
+               spu_restart_dma(spu);
+
+       return ret;
+}
+
+int spu_run(struct spu *spu)
+{
+       struct spu_problem __iomem *prob;
+       struct spu_priv1 __iomem *priv1;
+       struct spu_priv2 __iomem *priv2;
+       unsigned long status;
+       int ret;
+
+       prob = spu->problem;
+       priv1 = spu->priv1;
+       priv2 = spu->priv2;
+
+       /* Let SPU run.  */
+       spu->mm = current->mm;
+       eieio();
+       out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
+
+       do {
+               ret = wait_event_interruptible(spu->stop_wq,
+                        (!((status = in_be32(&prob->spu_status_R)) & 0x1))
+                       || (in_be64(&priv1->mfc_dsisr_RW) & MFC_DSISR_PTE_NOT_FOUND)
+                       || spu->class_0_pending);
+
+               if (status & SPU_STATUS_STOPPED_BY_STOP)
+                       ret = -EAGAIN;
+               else if (status & SPU_STATUS_STOPPED_BY_HALT)
+                       ret = -EIO;
+               else if (in_be64(&priv1->mfc_dsisr_RW) & MFC_DSISR_PTE_NOT_FOUND)
+                       ret = spu_handle_pte_fault(spu);
+
+               if (spu->class_0_pending)
+                       spu_irq_class_0_bottom(spu);
+
+               if (!ret && signal_pending(current))
+                       ret = -ERESTARTSYS;
+
+       } while (!ret);
+
+       /* Ensure SPU is stopped.  */
+       out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
+       eieio();
+       while (in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING)
+               cpu_relax();
+
+       out_be64(&priv2->slb_invalidate_all_W, 0);
+       out_be64(&priv1->tlb_invalidate_entry_W, 0UL);
+       eieio();
+
+       spu->mm = NULL;
+
+       /* Check for SPU breakpoint.  */
+       if (unlikely(current->ptrace & PT_PTRACED)) {
+               status = in_be32(&prob->spu_status_R);
+
+               if ((status & SPU_STATUS_STOPPED_BY_STOP)
+                   && status >> SPU_STOP_STATUS_SHIFT == 0x3fff) {
+                       force_sig(SIGTRAP, current);
+                       ret = -ERESTARTSYS;
+               }
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL(spu_run);
+
+static void __iomem * __init map_spe_prop(struct device_node *n,
+                                                const char *name)
+{
+       struct address_prop {
+               unsigned long address;
+               unsigned int len;
+       } __attribute__((packed)) *prop;
+
+       void *p;
+       int proplen;
+
+       p = get_property(n, name, &proplen);
+       if (proplen != sizeof (struct address_prop))
+               return NULL;
+
+       prop = p;
+
+       return ioremap(prop->address, prop->len);
+}
+
+static void spu_unmap(struct spu *spu)
+{
+       iounmap(spu->priv2);
+       iounmap(spu->priv1);
+       iounmap(spu->problem);
+       iounmap((u8 __iomem *)spu->local_store);
+}
+
+static int __init spu_map_device(struct spu *spu, struct device_node *spe)
+{
+       char *prop;
+       int ret;
+
+       ret = -ENODEV;
+       prop = get_property(spe, "isrc", NULL);
+       if (!prop)
+               goto out;
+       spu->isrc = *(unsigned int *)prop;
+
+       spu->name = get_property(spe, "name", NULL);
+       if (!spu->name)
+               goto out;
+
+       prop = get_property(spe, "local-store", NULL);
+       if (!prop)
+               goto out;
+       spu->local_store_phys = *(unsigned long *)prop;
+
+       /* we use local store as ram, not io memory */
+       spu->local_store = (void __force *)map_spe_prop(spe, "local-store");
+       if (!spu->local_store)
+               goto out;
+
+       spu->problem= map_spe_prop(spe, "problem");
+       if (!spu->problem)
+               goto out_unmap;
+
+       spu->priv1= map_spe_prop(spe, "priv1");
+       if (!spu->priv1)
+               goto out_unmap;
+
+       spu->priv2= map_spe_prop(spe, "priv2");
+       if (!spu->priv2)
+               goto out_unmap;
+       ret = 0;
+       goto out;
+
+out_unmap:
+       spu_unmap(spu);
+out:
+       return ret;
+}
+
+static int __init find_spu_node_id(struct device_node *spe)
+{
+       unsigned int *id;
+       struct device_node *cpu;
+
+       cpu = spe->parent->parent;
+       id = (unsigned int *)get_property(cpu, "node-id", NULL);
+
+       return id ? *id : 0;
+}
+
+static int __init create_spu(struct device_node *spe)
+{
+       struct spu *spu;
+       int ret;
+       static int number;
+
+       ret = -ENOMEM;
+       spu = kmalloc(sizeof (*spu), GFP_KERNEL);
+       if (!spu)
+               goto out;
+
+       ret = spu_map_device(spu, spe);
+       if (ret)
+               goto out_free;
+
+       spu->node = find_spu_node_id(spe);
+       spu->stop_code = 0;
+       spu->slb_replace = 0;
+       spu->mm = NULL;
+       spu->class_0_pending = 0;
+       spin_lock_init(&spu->register_lock);
+
+       out_be64(&spu->priv1->mfc_sdr_RW, mfspr(SPRN_SDR1));
+       out_be64(&spu->priv1->mfc_sr1_RW, 0x33);
+
+       init_waitqueue_head(&spu->stop_wq);
+       init_waitqueue_head(&spu->wbox_wq);
+       init_waitqueue_head(&spu->ibox_wq);
+
+       spu->ibox_fasync = NULL;
+       spu->wbox_fasync = NULL;
+
+       down(&spu_mutex);
+       spu->number = number++;
+       ret = spu_request_irqs(spu);
+       if (ret)
+               goto out_unmap;
+
+       list_add(&spu->list, &spu_list);
+       up(&spu_mutex);
+
+       pr_debug(KERN_DEBUG "Using SPE %s %02x %p %p %p %p %d\n",
+               spu->name, spu->isrc, spu->local_store,
+               spu->problem, spu->priv1, spu->priv2, spu->number);
+       goto out;
+
+out_unmap:
+       up(&spu_mutex);
+       spu_unmap(spu);
+out_free:
+       kfree(spu);
+out:
+       return ret;
+}
+
+static void destroy_spu(struct spu *spu)
+{
+       list_del_init(&spu->list);
+
+       spu_free_irqs(spu);
+       spu_unmap(spu);
+       kfree(spu);
+}
+
+static void cleanup_spu_base(void)
+{
+       struct spu *spu, *tmp;
+       down(&spu_mutex);
+       list_for_each_entry_safe(spu, tmp, &spu_list, list)
+               destroy_spu(spu);
+       up(&spu_mutex);
+}
+module_exit(cleanup_spu_base);
+
+static int __init init_spu_base(void)
+{
+       struct device_node *node;
+       int ret;
+
+       ret = -ENODEV;
+       for (node = of_find_node_by_type(NULL, "spe");
+                       node; node = of_find_node_by_type(node, "spe")) {
+               ret = create_spu(node);
+               if (ret) {
+                       printk(KERN_WARNING "%s: Error initializing %s\n",
+                               __FUNCTION__, node->name);
+                       cleanup_spu_base();
+                       break;
+               }
+       }
+       /* in some old firmware versions, the spe is called 'spc', so we
+          look for that as well */
+       for (node = of_find_node_by_type(NULL, "spc");
+                       node; node = of_find_node_by_type(node, "spc")) {
+               ret = create_spu(node);
+               if (ret) {
+                       printk(KERN_WARNING "%s: Error initializing %s\n",
+                               __FUNCTION__, node->name);
+                       cleanup_spu_base();
+                       break;
+               }
+       }
+       return ret;
+}
+module_init(init_spu_base);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
diff --git a/arch/powerpc/platforms/cell/spu_syscalls.c b/arch/powerpc/platforms/cell/spu_syscalls.c
new file mode 100644 (file)
index 0000000..43e0b18
--- /dev/null
@@ -0,0 +1,86 @@
+/*
+ * SPU file system -- system call stubs
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
+ *
+ * Author: Arnd Bergmann <arndb@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/file.h>
+#include <linux/module.h>
+#include <linux/syscalls.h>
+
+#include <asm/spu.h>
+
+struct spufs_calls spufs_calls = {
+       .owner = NULL,
+};
+
+/* These stub syscalls are needed to have the actual implementation
+ * within a loadable module. When spufs is built into the kernel,
+ * this file is not used and the syscalls directly enter the fs code */
+
+asmlinkage long sys_spu_create(const char __user *name,
+               unsigned int flags, mode_t mode)
+{
+       long ret;
+
+       ret = -ENOSYS;
+       if (try_module_get(spufs_calls.owner)) {
+               ret = spufs_calls.create_thread(name, flags, mode);
+               module_put(spufs_calls.owner);
+       }
+       return ret;
+}
+
+asmlinkage long sys_spu_run(int fd, __u32 __user *unpc, __u32 __user *ustatus)
+{
+       long ret;
+       struct file *filp;
+       int fput_needed;
+
+       ret = -ENOSYS;
+       if (try_module_get(spufs_calls.owner)) {
+               ret = -EBADF;
+               filp = fget_light(fd, &fput_needed);
+               if (filp) {
+                       ret = spufs_calls.spu_run(filp, unpc, ustatus);
+                       fput_light(filp, fput_needed);
+               }
+               module_put(spufs_calls.owner);
+       }
+       return ret;
+}
+
+int register_spu_syscalls(struct spufs_calls *calls)
+{
+       if (spufs_calls.owner)
+               return -EBUSY;
+
+       spufs_calls.create_thread = calls->create_thread;
+       spufs_calls.spu_run = calls->spu_run;
+       smp_mb();
+       spufs_calls.owner = calls->owner;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(register_spu_syscalls);
+
+void unregister_spu_syscalls(struct spufs_calls *calls)
+{
+       BUG_ON(spufs_calls.owner != calls->owner);
+       spufs_calls.owner = NULL;
+}
+EXPORT_SYMBOL_GPL(unregister_spu_syscalls);
diff --git a/arch/powerpc/platforms/cell/spufs/Makefile b/arch/powerpc/platforms/cell/spufs/Makefile
new file mode 100644 (file)
index 0000000..6f496e3
--- /dev/null
@@ -0,0 +1,3 @@
+obj-$(CONFIG_SPU_FS) += spufs.o
+
+spufs-y += inode.o file.o context.o syscalls.o
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c
new file mode 100644 (file)
index 0000000..a69b85e
--- /dev/null
@@ -0,0 +1,67 @@
+/*
+ * SPU file system -- SPU context management
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
+ *
+ * Author: Arnd Bergmann <arndb@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/slab.h>
+#include <asm/spu.h>
+#include "spufs.h"
+
+struct spu_context *alloc_spu_context(void)
+{
+       struct spu_context *ctx;
+       ctx = kmalloc(sizeof *ctx, GFP_KERNEL);
+       if (!ctx)
+               goto out;
+       ctx->spu = spu_alloc();
+       if (!ctx->spu)
+               goto out_free;
+       init_rwsem(&ctx->backing_sema);
+       spin_lock_init(&ctx->mmio_lock);
+       kref_init(&ctx->kref);
+       goto out;
+out_free:
+       kfree(ctx);
+       ctx = NULL;
+out:
+       return ctx;
+}
+
+void destroy_spu_context(struct kref *kref)
+{
+       struct spu_context *ctx;
+       ctx = container_of(kref, struct spu_context, kref);
+       if (ctx->spu)
+               spu_free(ctx->spu);
+       kfree(ctx);
+}
+
+struct spu_context * get_spu_context(struct spu_context *ctx)
+{
+       kref_get(&ctx->kref);
+       return ctx;
+}
+
+int put_spu_context(struct spu_context *ctx)
+{
+       return kref_put(&ctx->kref, &destroy_spu_context);
+}
+
+
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
new file mode 100644 (file)
index 0000000..c1e6433
--- /dev/null
@@ -0,0 +1,596 @@
+/*
+ * SPU file system -- file contents
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
+ *
+ * Author: Arnd Bergmann <arndb@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/fs.h>
+#include <linux/ioctl.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+
+#include <asm/io.h>
+#include <asm/semaphore.h>
+#include <asm/spu.h>
+#include <asm/uaccess.h>
+
+#include "spufs.h"
+
+static int
+spufs_mem_open(struct inode *inode, struct file *file)
+{
+       struct spufs_inode_info *i = SPUFS_I(inode);
+       file->private_data = i->i_ctx;
+       return 0;
+}
+
+static ssize_t
+spufs_mem_read(struct file *file, char __user *buffer,
+                               size_t size, loff_t *pos)
+{
+       struct spu *spu;
+       struct spu_context *ctx;
+       int ret;
+
+       ctx = file->private_data;
+       spu = ctx->spu;
+
+       down_read(&ctx->backing_sema);
+       if (spu->number & 0/*1*/) {
+               ret = generic_file_read(file, buffer, size, pos);
+               goto out;
+       }
+
+       ret = simple_read_from_buffer(buffer, size, pos,
+                                       spu->local_store, LS_SIZE);
+out:
+       up_read(&ctx->backing_sema);
+       return ret;
+}
+
+static ssize_t
+spufs_mem_write(struct file *file, const char __user *buffer,
+                                       size_t size, loff_t *pos)
+{
+       struct spu_context *ctx = file->private_data;
+       struct spu *spu = ctx->spu;
+
+       if (spu->number & 0) //1)
+               return generic_file_write(file, buffer, size, pos);
+
+       size = min_t(ssize_t, LS_SIZE - *pos, size);
+       if (size <= 0)
+               return -EFBIG;
+       *pos += size;
+       return copy_from_user(spu->local_store + *pos - size,
+                               buffer, size) ? -EFAULT : size;
+}
+
+static int
+spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
+{
+       struct spu_context *ctx = file->private_data;
+       struct spu *spu = ctx->spu;
+       unsigned long pfn;
+
+       if (spu->number & 0) //1)
+               return generic_file_mmap(file, vma);
+
+       vma->vm_flags |= VM_RESERVED;
+       vma->vm_page_prot = __pgprot(pgprot_val (vma->vm_page_prot)
+                                                       | _PAGE_NO_CACHE);
+       pfn = spu->local_store_phys >> PAGE_SHIFT;
+       /*
+        * This will work for actual SPUs, but not for vmalloc memory:
+        */
+       if (remap_pfn_range(vma, vma->vm_start, pfn,
+                               vma->vm_end-vma->vm_start, vma->vm_page_prot))
+               return -EAGAIN;
+       return 0;
+}
+
+static struct file_operations spufs_mem_fops = {
+       .open    = spufs_mem_open,
+       .read    = spufs_mem_read,
+       .write   = spufs_mem_write,
+       .mmap    = spufs_mem_mmap,
+       .llseek  = generic_file_llseek,
+};
+
+/* generic open function for all pipe-like files */
+static int spufs_pipe_open(struct inode *inode, struct file *file)
+{
+       struct spufs_inode_info *i = SPUFS_I(inode);
+       file->private_data = i->i_ctx;
+
+       return nonseekable_open(inode, file);
+}
+
+static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
+                       size_t len, loff_t *pos)
+{
+       struct spu_context *ctx;
+       struct spu_problem __iomem *prob;
+       u32 mbox_stat;
+       u32 mbox_data;
+
+       if (len < 4)
+               return -EINVAL;
+
+       ctx = file->private_data;
+       prob = ctx->spu->problem;
+       mbox_stat = in_be32(&prob->mb_stat_R);
+       if (!(mbox_stat & 0x0000ff))
+               return -EAGAIN;
+
+       mbox_data = in_be32(&prob->pu_mb_R);
+
+       if (copy_to_user(buf, &mbox_data, sizeof mbox_data))
+               return -EFAULT;
+
+       return 4;
+}
+
+static struct file_operations spufs_mbox_fops = {
+       .open   = spufs_pipe_open,
+       .read   = spufs_mbox_read,
+};
+
+static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
+                       size_t len, loff_t *pos)
+{
+       struct spu_context *ctx;
+       u32 mbox_stat;
+
+       if (len < 4)
+               return -EINVAL;
+
+       ctx = file->private_data;
+       mbox_stat = in_be32(&ctx->spu->problem->mb_stat_R) & 0xff;
+
+       if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat))
+               return -EFAULT;
+
+       return 4;
+}
+
+static struct file_operations spufs_mbox_stat_fops = {
+       .open   = spufs_pipe_open,
+       .read   = spufs_mbox_stat_read,
+};
+
+/* low-level ibox access function */
+size_t spu_ibox_read(struct spu *spu, u32 *data)
+{
+       int ret;
+
+       spin_lock_irq(&spu->register_lock);
+
+       if (in_be32(&spu->problem->mb_stat_R) & 0xff0000) {
+               /* read the first available word */
+               *data = in_be64(&spu->priv2->puint_mb_R);
+               ret = 4;
+       } else {
+               /* make sure we get woken up by the interrupt */
+               out_be64(&spu->priv1->int_mask_class2_RW,
+                       in_be64(&spu->priv1->int_mask_class2_RW) | 0x1);
+               ret = 0;
+       }
+
+       spin_unlock_irq(&spu->register_lock);
+       return ret;
+}
+EXPORT_SYMBOL(spu_ibox_read);
+
+static int spufs_ibox_fasync(int fd, struct file *file, int on)
+{
+       struct spu_context *ctx;
+       ctx = file->private_data;
+       return fasync_helper(fd, file, on, &ctx->spu->ibox_fasync);
+}
+
+static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
+                       size_t len, loff_t *pos)
+{
+       struct spu_context *ctx;
+       u32 ibox_data;
+       ssize_t ret;
+
+       if (len < 4)
+               return -EINVAL;
+
+       ctx = file->private_data;
+
+       ret = 0;
+       if (file->f_flags & O_NONBLOCK) {
+               if (!spu_ibox_read(ctx->spu, &ibox_data))
+                       ret = -EAGAIN;
+       } else {
+               ret = wait_event_interruptible(ctx->spu->ibox_wq,
+                                spu_ibox_read(ctx->spu, &ibox_data));
+       }
+
+       if (ret)
+               return ret;
+
+       ret = 4;
+       if (copy_to_user(buf, &ibox_data, sizeof ibox_data))
+               ret = -EFAULT;
+
+       return ret;
+}
+
+static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
+{
+       struct spu_context *ctx;
+       struct spu_problem __iomem *prob;
+       u32 mbox_stat;
+       unsigned int mask;
+
+       ctx = file->private_data;
+       prob = ctx->spu->problem;
+       mbox_stat = in_be32(&prob->mb_stat_R);
+
+       poll_wait(file, &ctx->spu->ibox_wq, wait);
+
+       mask = 0;
+       if (mbox_stat & 0xff0000)
+               mask |= POLLIN | POLLRDNORM;
+
+       return mask;
+}
+
+static struct file_operations spufs_ibox_fops = {
+       .open   = spufs_pipe_open,
+       .read   = spufs_ibox_read,
+       .poll   = spufs_ibox_poll,
+       .fasync = spufs_ibox_fasync,
+};
+
+static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
+                       size_t len, loff_t *pos)
+{
+       struct spu_context *ctx;
+       u32 ibox_stat;
+
+       if (len < 4)
+               return -EINVAL;
+
+       ctx = file->private_data;
+       ibox_stat = (in_be32(&ctx->spu->problem->mb_stat_R) >> 16) & 0xff;
+
+       if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat))
+               return -EFAULT;
+
+       return 4;
+}
+
+static struct file_operations spufs_ibox_stat_fops = {
+       .open   = spufs_pipe_open,
+       .read   = spufs_ibox_stat_read,
+};
+
+/* low-level mailbox write */
+size_t spu_wbox_write(struct spu *spu, u32 data)
+{
+       int ret;
+
+       spin_lock_irq(&spu->register_lock);
+
+       if (in_be32(&spu->problem->mb_stat_R) & 0x00ff00) {
+               /* we have space to write wbox_data to */
+               out_be32(&spu->problem->spu_mb_W, data);
+               ret = 4;
+       } else {
+               /* make sure we get woken up by the interrupt when space
+                  becomes available */
+               out_be64(&spu->priv1->int_mask_class2_RW,
+                       in_be64(&spu->priv1->int_mask_class2_RW) | 0x10);
+               ret = 0;
+       }
+
+       spin_unlock_irq(&spu->register_lock);
+       return ret;
+}
+EXPORT_SYMBOL(spu_wbox_write);
+
+static int spufs_wbox_fasync(int fd, struct file *file, int on)
+{
+       struct spu_context *ctx;
+       ctx = file->private_data;
+       return fasync_helper(fd, file, on, &ctx->spu->wbox_fasync);
+}
+
+static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
+                       size_t len, loff_t *pos)
+{
+       struct spu_context *ctx;
+       u32 wbox_data;
+       int ret;
+
+       if (len < 4)
+               return -EINVAL;
+
+       ctx = file->private_data;
+
+       if (copy_from_user(&wbox_data, buf, sizeof wbox_data))
+               return -EFAULT;
+
+       ret = 0;
+       if (file->f_flags & O_NONBLOCK) {
+               if (!spu_wbox_write(ctx->spu, wbox_data))
+                       ret = -EAGAIN;
+       } else {
+               ret = wait_event_interruptible(ctx->spu->wbox_wq,
+                       spu_wbox_write(ctx->spu, wbox_data));
+       }
+
+       return ret ? ret : sizeof wbox_data;
+}
+
+static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
+{
+       struct spu_context *ctx;
+       struct spu_problem __iomem *prob;
+       u32 mbox_stat;
+       unsigned int mask;
+
+       ctx = file->private_data;
+       prob = ctx->spu->problem;
+       mbox_stat = in_be32(&prob->mb_stat_R);
+
+       poll_wait(file, &ctx->spu->wbox_wq, wait);
+
+       mask = 0;
+       if (mbox_stat & 0x00ff00)
+               mask = POLLOUT | POLLWRNORM;
+
+       return mask;
+}
+
+static struct file_operations spufs_wbox_fops = {
+       .open   = spufs_pipe_open,
+       .write  = spufs_wbox_write,
+       .poll   = spufs_wbox_poll,
+       .fasync = spufs_wbox_fasync,
+};
+
+static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
+                       size_t len, loff_t *pos)
+{
+       struct spu_context *ctx;
+       u32 wbox_stat;
+
+       if (len < 4)
+               return -EINVAL;
+
+       ctx = file->private_data;
+       wbox_stat = (in_be32(&ctx->spu->problem->mb_stat_R) >> 8) & 0xff;
+
+       if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat))
+               return -EFAULT;
+
+       return 4;
+}
+
+static struct file_operations spufs_wbox_stat_fops = {
+       .open   = spufs_pipe_open,
+       .read   = spufs_wbox_stat_read,
+};
+
+long spufs_run_spu(struct file *file, struct spu_context *ctx,
+               u32 *npc, u32 *status)
+{
+       struct spu_problem __iomem *prob;
+       int ret;
+
+       if (file->f_flags & O_NONBLOCK) {
+               ret = -EAGAIN;
+               if (!down_write_trylock(&ctx->backing_sema))
+                       goto out;
+       } else {
+               down_write(&ctx->backing_sema);
+       }
+
+       prob = ctx->spu->problem;
+       out_be32(&prob->spu_npc_RW, *npc);
+
+       ret = spu_run(ctx->spu);
+
+       *status = in_be32(&prob->spu_status_R);
+       *npc = in_be32(&prob->spu_npc_RW);
+
+       up_write(&ctx->backing_sema);
+
+out:
+       return ret;
+}
+
+static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
+                       size_t len, loff_t *pos)
+{
+       struct spu_context *ctx;
+       struct spu_problem *prob;
+       u32 data;
+
+       ctx = file->private_data;
+       prob = ctx->spu->problem;
+
+       if (len < 4)
+               return -EINVAL;
+
+       data = in_be32(&prob->signal_notify1);
+       if (copy_to_user(buf, &data, 4))
+               return -EFAULT;
+
+       return 4;
+}
+
+static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
+                       size_t len, loff_t *pos)
+{
+       struct spu_context *ctx;
+       struct spu_problem *prob;
+       u32 data;
+
+       ctx = file->private_data;
+       prob = ctx->spu->problem;
+
+       if (len < 4)
+               return -EINVAL;
+
+       if (copy_from_user(&data, buf, 4))
+               return -EFAULT;
+
+       out_be32(&prob->signal_notify1, data);
+
+       return 4;
+}
+
+static struct file_operations spufs_signal1_fops = {
+       .open = spufs_pipe_open,
+       .read = spufs_signal1_read,
+       .write = spufs_signal1_write,
+};
+
+static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
+                       size_t len, loff_t *pos)
+{
+       struct spu_context *ctx;
+       struct spu_problem *prob;
+       u32 data;
+
+       ctx = file->private_data;
+       prob = ctx->spu->problem;
+
+       if (len < 4)
+               return -EINVAL;
+
+       data = in_be32(&prob->signal_notify2);
+       if (copy_to_user(buf, &data, 4))
+               return -EFAULT;
+
+       return 4;
+}
+
+static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
+                       size_t len, loff_t *pos)
+{
+       struct spu_context *ctx;
+       struct spu_problem *prob;
+       u32 data;
+
+       ctx = file->private_data;
+       prob = ctx->spu->problem;
+
+       if (len < 4)
+               return -EINVAL;
+
+       if (copy_from_user(&data, buf, 4))
+               return -EFAULT;
+
+       out_be32(&prob->signal_notify2, data);
+
+       return 4;
+}
+
+static struct file_operations spufs_signal2_fops = {
+       .open = spufs_pipe_open,
+       .read = spufs_signal2_read,
+       .write = spufs_signal2_write,
+};
+
+static void spufs_signal1_type_set(void *data, u64 val)
+{
+       struct spu_context *ctx = data;
+       struct spu_priv2 *priv2 = ctx->spu->priv2;
+       u64 tmp;
+
+       spin_lock_irq(&ctx->spu->register_lock);
+       tmp = in_be64(&priv2->spu_cfg_RW);
+       if (val)
+               tmp |= 1;
+       else
+               tmp &= ~1;
+       out_be64(&priv2->spu_cfg_RW, tmp);
+       spin_unlock_irq(&ctx->spu->register_lock);
+}
+
+static u64 spufs_signal1_type_get(void *data)
+{
+       struct spu_context *ctx = data;
+       return (in_be64(&ctx->spu->priv2->spu_cfg_RW) & 1) != 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,
+                                       spufs_signal1_type_set, "%llu");
+
+static void spufs_signal2_type_set(void *data, u64 val)
+{
+       struct spu_context *ctx = data;
+       struct spu_priv2 *priv2 = ctx->spu->priv2;
+       u64 tmp;
+
+       spin_lock_irq(&ctx->spu->register_lock);
+       tmp = in_be64(&priv2->spu_cfg_RW);
+       if (val)
+               tmp |= 2;
+       else
+               tmp &= ~2;
+       out_be64(&priv2->spu_cfg_RW, tmp);
+       spin_unlock_irq(&ctx->spu->register_lock);
+}
+
+static u64 spufs_signal2_type_get(void *data)
+{
+       struct spu_context *ctx = data;
+       return (in_be64(&ctx->spu->priv2->spu_cfg_RW) & 2) != 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
+                                       spufs_signal2_type_set, "%llu");
+
+static void spufs_npc_set(void *data, u64 val)
+{
+       struct spu_context *ctx = data;
+       out_be32(&ctx->spu->problem->spu_npc_RW, val);
+}
+
+static u64 spufs_npc_get(void *data)
+{
+       struct spu_context *ctx = data;
+       u64 ret;
+       ret = in_be32(&ctx->spu->problem->spu_npc_RW);
+       return ret;
+}
+DEFINE_SIMPLE_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set, "%llx\n")
+
+struct tree_descr spufs_dir_contents[] = {
+       { "mem",  &spufs_mem_fops,  0666, },
+       { "mbox", &spufs_mbox_fops, 0444, },
+       { "ibox", &spufs_ibox_fops, 0444, },
+       { "wbox", &spufs_wbox_fops, 0222, },
+       { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
+       { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
+       { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
+       { "signal1", &spufs_signal1_fops, 0666, },
+       { "signal2", &spufs_signal2_fops, 0666, },
+       { "signal1_type", &spufs_signal1_type, 0666, },
+       { "signal2_type", &spufs_signal2_type, 0666, },
+       { "npc", &spufs_npc_ops, 0666, },
+       {},
+};
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
new file mode 100644 (file)
index 0000000..f7aa0a6
--- /dev/null
@@ -0,0 +1,470 @@
+/*
+ * SPU file system
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
+ *
+ * Author: Arnd Bergmann <arndb@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/backing-dev.h>
+#include <linux/init.h>
+#include <linux/ioctl.h>
+#include <linux/module.h>
+#include <linux/namei.h>
+#include <linux/pagemap.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/parser.h>
+
+#include <asm/io.h>
+#include <asm/semaphore.h>
+#include <asm/spu.h>
+#include <asm/uaccess.h>
+
+#include "spufs.h"
+
+static kmem_cache_t *spufs_inode_cache;
+
+/* Information about the backing dev, same as ramfs */
+#if 0
+static struct backing_dev_info spufs_backing_dev_info = {
+       .ra_pages       = 0,    /* No readahead */
+       .capabilities   = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK |
+         BDI_CAP_MAP_DIRECT | BDI_CAP_MAP_COPY | BDI_CAP_READ_MAP |
+         BDI_CAP_WRITE_MAP,
+};
+
+static struct address_space_operations spufs_aops = {
+       .readpage       = simple_readpage,
+       .prepare_write  = simple_prepare_write,
+       .commit_write   = simple_commit_write,
+};
+#endif
+
+/* Inode operations */
+
+static struct inode *
+spufs_alloc_inode(struct super_block *sb)
+{
+       struct spufs_inode_info *ei;
+
+       ei = kmem_cache_alloc(spufs_inode_cache, SLAB_KERNEL);
+       if (!ei)
+               return NULL;
+       return &ei->vfs_inode;
+}
+
+static void
+spufs_destroy_inode(struct inode *inode)
+{
+       kmem_cache_free(spufs_inode_cache, SPUFS_I(inode));
+}
+
+static void
+spufs_init_once(void *p, kmem_cache_t * cachep, unsigned long flags)
+{
+       struct spufs_inode_info *ei = p;
+
+       if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+           SLAB_CTOR_CONSTRUCTOR) {
+               inode_init_once(&ei->vfs_inode);
+       }
+}
+
+static struct inode *
+spufs_new_inode(struct super_block *sb, int mode)
+{
+       struct inode *inode;
+
+       inode = new_inode(sb);
+       if (!inode)
+               goto out;
+
+       inode->i_mode = mode;
+       inode->i_uid = current->fsuid;
+       inode->i_gid = current->fsgid;
+       inode->i_blksize = PAGE_CACHE_SIZE;
+       inode->i_blocks = 0;
+       inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+out:
+       return inode;
+}
+
+static int
+spufs_setattr(struct dentry *dentry, struct iattr *attr)
+{
+       struct inode *inode = dentry->d_inode;
+
+/*     dump_stack();
+       pr_debug("ia_size %lld, i_size:%lld\n", attr->ia_size, inode->i_size);
+*/
+       if ((attr->ia_valid & ATTR_SIZE) &&
+           (attr->ia_size != inode->i_size))
+               return -EINVAL;
+       return inode_setattr(inode, attr);
+}
+
+
+static int
+spufs_new_file(struct super_block *sb, struct dentry *dentry,
+               struct file_operations *fops, int mode,
+               struct spu_context *ctx)
+{
+       static struct inode_operations spufs_file_iops = {
+               .getattr = simple_getattr,
+               .setattr = spufs_setattr,
+               .unlink  = simple_unlink,
+       };
+       struct inode *inode;
+       int ret;
+
+       ret = -ENOSPC;
+       inode = spufs_new_inode(sb, S_IFREG | mode);
+       if (!inode)
+               goto out;
+
+       ret = 0;
+       inode->i_op = &spufs_file_iops;
+       inode->i_fop = fops;
+       inode->u.generic_ip = SPUFS_I(inode)->i_ctx = get_spu_context(ctx);
+       d_add(dentry, inode);
+out:
+       return ret;
+}
+
+static void
+spufs_delete_inode(struct inode *inode)
+{
+       if (SPUFS_I(inode)->i_ctx)
+               put_spu_context(SPUFS_I(inode)->i_ctx);
+       clear_inode(inode);
+}
+
+static int
+spufs_fill_dir(struct dentry *dir, struct tree_descr *files,
+               int mode, struct spu_context *ctx)
+{
+       struct dentry *dentry;
+       int ret;
+
+       while (files->name && files->name[0]) {
+               ret = -ENOMEM;
+               dentry = d_alloc_name(dir, files->name);
+               if (!dentry)
+                       goto out;
+               ret = spufs_new_file(dir->d_sb, dentry, files->ops,
+                                       files->mode & mode, ctx);
+               if (ret)
+                       goto out;
+               files++;
+       }
+       return 0;
+out:
+       // FIXME: remove all files that are left
+
+       return ret;
+}
+
+static int spufs_rmdir(struct inode *root, struct dentry *dir_dentry)
+{
+       struct dentry *dentry;
+       int err;
+
+       spin_lock(&dcache_lock);
+       /* remove all entries */
+       err = 0;
+       list_for_each_entry(dentry, &dir_dentry->d_subdirs, d_child) {
+               if (d_unhashed(dentry) || !dentry->d_inode)
+                       continue;
+               atomic_dec(&dentry->d_count);
+               spin_lock(&dentry->d_lock);
+               __d_drop(dentry);
+               spin_unlock(&dentry->d_lock);
+       }
+       spin_unlock(&dcache_lock);
+       if (!err) {
+               shrink_dcache_parent(dir_dentry);
+               err = simple_rmdir(root, dir_dentry);
+       }
+       return err;
+}
+
+static int spufs_dir_close(struct inode *inode, struct file *file)
+{
+       struct inode *dir;
+       struct dentry *dentry;
+       int ret;
+
+       dentry = file->f_dentry;
+       dir = dentry->d_parent->d_inode;
+       down(&dir->i_sem);
+       ret = spufs_rmdir(dir, file->f_dentry);
+       WARN_ON(ret);
+       up(&dir->i_sem);
+       return dcache_dir_close(inode, file);
+}
+
+struct inode_operations spufs_dir_inode_operations = {
+       .lookup = simple_lookup,
+};
+
+struct file_operations spufs_autodelete_dir_operations = {
+       .open           = dcache_dir_open,
+       .release        = spufs_dir_close,
+       .llseek         = dcache_dir_lseek,
+       .read           = generic_read_dir,
+       .readdir        = dcache_readdir,
+       .fsync          = simple_sync_file,
+};
+
+static int
+spufs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+{
+       int ret;
+       struct inode *inode;
+       struct spu_context *ctx;
+
+       ret = -ENOSPC;
+       inode = spufs_new_inode(dir->i_sb, mode | S_IFDIR);
+       if (!inode)
+               goto out;
+
+       if (dir->i_mode & S_ISGID) {
+               inode->i_gid = dir->i_gid;
+               inode->i_mode &= S_ISGID;
+       }
+       ctx = alloc_spu_context();
+       SPUFS_I(inode)->i_ctx = ctx;
+       if (!ctx)
+               goto out_iput;
+
+       inode->i_op = &spufs_dir_inode_operations;
+       inode->i_fop = &simple_dir_operations;
+       ret = spufs_fill_dir(dentry, spufs_dir_contents, mode, ctx);
+       if (ret)
+               goto out_free_ctx;
+
+       d_instantiate(dentry, inode);
+       dget(dentry);
+       dir->i_nlink++;
+       goto out;
+
+out_free_ctx:
+       put_spu_context(ctx);
+out_iput:
+       iput(inode);
+out:
+       return ret;
+}
+
+long
+spufs_create_thread(struct nameidata *nd, const char *name,
+                       unsigned int flags, mode_t mode)
+{
+       struct dentry *dentry;
+       struct file *filp;
+       int ret;
+
+       /* need to be at the root of spufs */
+       ret = -EINVAL;
+       if (nd->dentry->d_sb->s_magic != SPUFS_MAGIC ||
+               nd->dentry != nd->dentry->d_sb->s_root)
+               goto out;
+
+       dentry = lookup_create(nd, 1);
+       ret = PTR_ERR(dentry);
+       if (IS_ERR(dentry))
+               goto out_dir;
+
+       ret = -EEXIST;
+       if (dentry->d_inode)
+               goto out_dput;
+
+       mode &= ~current->fs->umask;
+       ret = spufs_mkdir(nd->dentry->d_inode, dentry, mode & S_IRWXUGO);
+       if (ret)
+               goto out_dput;
+
+       ret = get_unused_fd();
+       if (ret < 0)
+               goto out_dput;
+
+       dentry->d_inode->i_nlink++;
+
+       filp = filp_open(name, O_RDONLY, mode);
+       if (IS_ERR(filp)) {
+               // FIXME: remove directory again
+               put_unused_fd(ret);
+               ret = PTR_ERR(filp);
+       } else {
+               filp->f_op = &spufs_autodelete_dir_operations;
+               fd_install(ret, filp);
+       }
+
+out_dput:
+       dput(dentry);
+out_dir:
+       up(&nd->dentry->d_inode->i_sem);
+out:
+       return ret;
+}
+
+/* File system initialization */
+enum {
+       Opt_uid, Opt_gid, Opt_err,
+};
+
+static match_table_t spufs_tokens = {
+       { Opt_uid, "uid=%d" },
+       { Opt_gid, "gid=%d" },
+       { Opt_err, NULL  },
+};
+
+static int
+spufs_parse_options(char *options, struct inode *root)
+{
+       char *p;
+       substring_t args[MAX_OPT_ARGS];
+
+       while ((p = strsep(&options, ",")) != NULL) {
+               int token, option;
+
+               if (!*p)
+                       continue;
+
+               token = match_token(p, spufs_tokens, args);
+               switch (token) {
+               case Opt_uid:
+                       if (match_int(&args[0], &option))
+                               return 0;
+                       root->i_uid = option;
+                       break;
+               case Opt_gid:
+                       if (match_int(&args[0], &option))
+                               return 0;
+                       root->i_gid = option;
+                       break;
+               default:
+                       return 0;
+               }
+       }
+       return 1;
+}
+
+static int
+spufs_create_root(struct super_block *sb, void *data) {
+       struct inode *inode;
+       int ret;
+
+       ret = -ENOMEM;
+       inode = spufs_new_inode(sb, S_IFDIR | 0775);
+       if (!inode)
+               goto out;
+
+       inode->i_op = &spufs_dir_inode_operations;
+       inode->i_fop = &simple_dir_operations;
+       SPUFS_I(inode)->i_ctx = NULL;
+
+       ret = -EINVAL;
+       if (!spufs_parse_options(data, inode))
+               goto out_iput;
+
+       ret = -ENOMEM;
+       sb->s_root = d_alloc_root(inode);
+       if (!sb->s_root)
+               goto out_iput;
+
+       return 0;
+out_iput:
+       iput(inode);
+out:
+       return ret;
+}
+
+static int
+spufs_fill_super(struct super_block *sb, void *data, int silent)
+{
+       static struct super_operations s_ops = {
+               .alloc_inode = spufs_alloc_inode,
+               .destroy_inode = spufs_destroy_inode,
+               .statfs = simple_statfs,
+               .delete_inode = spufs_delete_inode,
+               .drop_inode = generic_delete_inode,
+       };
+
+       sb->s_maxbytes = MAX_LFS_FILESIZE;
+       sb->s_blocksize = PAGE_CACHE_SIZE;
+       sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+       sb->s_magic = SPUFS_MAGIC;
+       sb->s_op = &s_ops;
+
+       return spufs_create_root(sb, data);
+}
+
+static struct super_block *
+spufs_get_sb(struct file_system_type *fstype, int flags,
+               const char *name, void *data)
+{
+       return get_sb_single(fstype, flags, data, spufs_fill_super);
+}
+
+static struct file_system_type spufs_type = {
+       .owner = THIS_MODULE,
+       .name = "spufs",
+       .get_sb = spufs_get_sb,
+       .kill_sb = kill_litter_super,
+};
+
+static int spufs_init(void)
+{
+       int ret;
+       ret = -ENOMEM;
+       spufs_inode_cache = kmem_cache_create("spufs_inode_cache",
+                       sizeof(struct spufs_inode_info), 0,
+                       SLAB_HWCACHE_ALIGN, spufs_init_once, NULL);
+
+       if (!spufs_inode_cache)
+               goto out;
+       ret = register_filesystem(&spufs_type);
+       if (ret)
+               goto out_cache;
+       ret = register_spu_syscalls(&spufs_calls);
+       if (ret)
+               goto out_fs;
+       return 0;
+out_fs:
+       unregister_filesystem(&spufs_type);
+out_cache:
+       kmem_cache_destroy(spufs_inode_cache);
+out:
+       return ret;
+}
+module_init(spufs_init);
+
+static void spufs_exit(void)
+{
+       unregister_spu_syscalls(&spufs_calls);
+       unregister_filesystem(&spufs_type);
+       kmem_cache_destroy(spufs_inode_cache);
+}
+module_exit(spufs_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
+
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
new file mode 100644 (file)
index 0000000..b37fe79
--- /dev/null
@@ -0,0 +1,71 @@
+/*
+ * SPU file system
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
+ *
+ * Author: Arnd Bergmann <arndb@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#ifndef SPUFS_H
+#define SPUFS_H
+
+#include <linux/kref.h>
+#include <linux/rwsem.h>
+#include <linux/spinlock.h>
+#include <linux/fs.h>
+
+#include <asm/spu.h>
+
+/* The magic number for our file system */
+enum {
+       SPUFS_MAGIC = 0x23c9b64e,
+};
+
+struct spu_context {
+       struct spu *spu;                  /* pointer to a physical SPU */
+       struct rw_semaphore backing_sema; /* protects the above */
+       spinlock_t mmio_lock;             /* protects mmio access */
+
+       struct kref kref;
+};
+
+struct spufs_inode_info {
+       struct spu_context *i_ctx;
+       struct inode vfs_inode;
+};
+#define SPUFS_I(inode) \
+       container_of(inode, struct spufs_inode_info, vfs_inode)
+
+extern struct tree_descr spufs_dir_contents[];
+
+/* system call implementation */
+long spufs_run_spu(struct file *file,
+                  struct spu_context *ctx, u32 *npc, u32 *status);
+long spufs_create_thread(struct nameidata *nd, const char *name,
+                        unsigned int flags, mode_t mode);
+
+/* context management */
+struct spu_context * alloc_spu_context(void);
+void destroy_spu_context(struct kref *kref);
+struct spu_context * get_spu_context(struct spu_context *ctx);
+int put_spu_context(struct spu_context *ctx);
+
+void spu_acquire(struct spu_context *ctx);
+void spu_release(struct spu_context *ctx);
+void spu_acquire_runnable(struct spu_context *ctx);
+void spu_acquire_saved(struct spu_context *ctx);
+
+#endif
diff --git a/arch/powerpc/platforms/cell/spufs/syscalls.c b/arch/powerpc/platforms/cell/spufs/syscalls.c
new file mode 100644 (file)
index 0000000..3f71bb5
--- /dev/null
@@ -0,0 +1,106 @@
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
+
+#include <asm/uaccess.h>
+
+#include "spufs.h"
+
+/**
+ * sys_spu_run - run code loaded into an SPU
+ *
+ * @unpc:    next program counter for the SPU
+ * @ustatus: status of the SPU
+ *
+ * This system call transfers the control of execution of a
+ * user space thread to an SPU. It will return when the
+ * SPU has finished executing or when it hits an error
+ * condition and it will be interrupted if a signal needs
+ * to be delivered to a handler in user space.
+ *
+ * The next program counter is set to the passed value
+ * before the SPU starts fetching code and the user space
+ * pointer gets updated with the new value when returning
+ * from kernel space.
+ *
+ * The status value returned from spu_run reflects the
+ * value of the spu_status register after the SPU has stopped.
+ *
+ */
+long do_spu_run(struct file *filp, __u32 __user *unpc, __u32 __user *ustatus)
+{
+       long ret;
+       struct spufs_inode_info *i;
+       u32 npc, status;
+
+       ret = -EFAULT;
+       if (get_user(npc, unpc))
+               goto out;
+
+       ret = -EINVAL;
+       if (filp->f_vfsmnt->mnt_sb->s_magic != SPUFS_MAGIC)
+               goto out;
+
+       i = SPUFS_I(filp->f_dentry->d_inode);
+       ret = spufs_run_spu(filp, i->i_ctx, &npc, &status);
+
+       if (ret ==-EAGAIN || ret == -EIO)
+               ret = status;
+
+       if (put_user(npc, unpc))
+               ret = -EFAULT;
+
+       if (ustatus && put_user(status, ustatus))
+               ret = -EFAULT;
+out:
+       return ret;
+}
+
+#ifndef MODULE
+asmlinkage long sys_spu_run(int fd, __u32 __user *unpc, __u32 __user *ustatus)
+{
+       int fput_needed;
+       struct file *filp;
+       long ret;
+
+       ret = -EBADF;
+       filp = fget_light(fd, &fput_needed);
+       if (filp) {
+               ret = do_spu_run(filp, unpc, ustatus);
+               fput_light(filp, fput_needed);
+       }
+
+       return ret;
+}
+#endif
+
+asmlinkage long sys_spu_create(const char __user *pathname,
+                                       unsigned int flags, mode_t mode)
+{
+       char *tmp;
+       int ret;
+
+       tmp = getname(pathname);
+       ret = PTR_ERR(tmp);
+       if (!IS_ERR(tmp)) {
+               struct nameidata nd;
+
+               ret = path_lookup(tmp, LOOKUP_PARENT|
+                               LOOKUP_OPEN|LOOKUP_CREATE, &nd);
+               if (!ret) {
+                       ret = spufs_create_thread(&nd, pathname, flags, mode);
+                       path_release(&nd);
+               }
+               putname(tmp);
+       }
+
+       return ret;
+}
+
+struct spufs_calls spufs_calls = {
+       .create_thread = sys_spu_create,
+       .spu_run = do_spu_run,
+       .owner = THIS_MODULE,
+};
index 28f1082e5040f4aa6ea9573a477f1cc2411199a0..95075f99a6d4ea09abfb4c388a019a040d389e1c 100644 (file)
@@ -307,7 +307,6 @@ EXPORT_SYMBOL(__res);
 
 EXPORT_SYMBOL(next_mmu_context);
 EXPORT_SYMBOL(set_context);
-EXPORT_SYMBOL_GPL(__handle_mm_fault); /* For MOL */
 EXPORT_SYMBOL(disarm_decr);
 #ifdef CONFIG_PPC_STD_MMU
 extern long mol_trampoline;
diff --git a/include/asm-powerpc/spu.h b/include/asm-powerpc/spu.h
new file mode 100644 (file)
index 0000000..b036385
--- /dev/null
@@ -0,0 +1,498 @@
+/*
+ * SPU core / file system interface and HW structures
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
+ *
+ * Author: Arnd Bergmann <arndb@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.        See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _SPU_H
+#define _SPU_H
+#include <linux/config.h>
+#include <linux/kref.h>
+#include <linux/workqueue.h>
+
+#define LS_ORDER (6)           /* 256 kb */
+
+#define LS_SIZE (PAGE_SIZE << LS_ORDER)
+
+struct spu {
+       char *name;
+       unsigned long local_store_phys;
+       u8 *local_store;
+       struct spu_problem __iomem *problem;
+       struct spu_priv1 __iomem *priv1;
+       struct spu_priv2 __iomem *priv2;
+       struct list_head list;
+       int number;
+       u32 isrc;
+       u32 node;
+       struct kref kref;
+       size_t ls_size;
+       unsigned int slb_replace;
+       struct mm_struct *mm;
+       int class_0_pending;
+       spinlock_t register_lock;
+
+       u32 stop_code;
+       wait_queue_head_t stop_wq;
+       wait_queue_head_t ibox_wq;
+       wait_queue_head_t wbox_wq;
+       struct fasync_struct *ibox_fasync;
+       struct fasync_struct *wbox_fasync;
+
+       char irq_c0[8];
+       char irq_c1[8];
+       char irq_c2[8];
+};
+
+struct spu *spu_alloc(void);
+void spu_free(struct spu *spu);
+int spu_run(struct spu *spu);
+
+size_t spu_wbox_write(struct spu *spu, u32 data);
+size_t spu_ibox_read(struct spu *spu, u32 *data);
+
+extern struct spufs_calls {
+       asmlinkage long (*create_thread)(const char __user *name,
+                                       unsigned int flags, mode_t mode);
+       asmlinkage long (*spu_run)(struct file *filp, __u32 __user *unpc,
+                                               __u32 __user *ustatus);
+       struct module *owner;
+} spufs_calls;
+
+#ifdef CONFIG_SPU_FS_MODULE
+int register_spu_syscalls(struct spufs_calls *calls);
+void unregister_spu_syscalls(struct spufs_calls *calls);
+#else
+static inline int register_spu_syscalls(struct spufs_calls *calls)
+{
+       return 0;
+}
+static inline void unregister_spu_syscalls(struct spufs_calls *calls)
+{
+}
+#endif /* MODULE */
+
+
+/*
+ * This defines the Local Store, Problem Area and Privlege Area of an SPU.
+ */
+
+union mfc_tag_size_class_cmd {
+       struct {
+               u16 mfc_size;
+               u16 mfc_tag;
+               u8  pad;
+               u8  mfc_rclassid;
+               u16 mfc_cmd;
+       } u;
+       struct {
+               u32 mfc_size_tag32;
+               u32 mfc_class_cmd32;
+       } by32;
+       u64 all64;
+};
+
+struct mfc_cq_sr {
+       u64 mfc_cq_data0_RW;
+       u64 mfc_cq_data1_RW;
+       u64 mfc_cq_data2_RW;
+       u64 mfc_cq_data3_RW;
+};
+
+struct spu_problem {
+#define MS_SYNC_PENDING         1L
+       u64 spc_mssync_RW;                                      /* 0x0000 */
+       u8  pad_0x0008_0x3000[0x3000 - 0x0008];
+
+       /* DMA Area */
+       u8  pad_0x3000_0x3004[0x4];                             /* 0x3000 */
+       u32 mfc_lsa_W;                                          /* 0x3004 */
+       u64 mfc_ea_W;                                           /* 0x3008 */
+       union mfc_tag_size_class_cmd mfc_union_W;                       /* 0x3010 */
+       u8  pad_0x3018_0x3104[0xec];                            /* 0x3018 */
+       u32 dma_qstatus_R;                                      /* 0x3104 */
+       u8  pad_0x3108_0x3204[0xfc];                            /* 0x3108 */
+       u32 dma_querytype_RW;                                   /* 0x3204 */
+       u8  pad_0x3208_0x321c[0x14];                            /* 0x3208 */
+       u32 dma_querymask_RW;                                   /* 0x321c */
+       u8  pad_0x3220_0x322c[0xc];                             /* 0x3220 */
+       u32 dma_tagstatus_R;                                    /* 0x322c */
+#define DMA_TAGSTATUS_INTR_ANY 1u
+#define DMA_TAGSTATUS_INTR_ALL 2u
+       u8  pad_0x3230_0x4000[0x4000 - 0x3230];                 /* 0x3230 */
+
+       /* SPU Control Area */
+       u8  pad_0x4000_0x4004[0x4];                             /* 0x4000 */
+       u32 pu_mb_R;                                            /* 0x4004 */
+       u8  pad_0x4008_0x400c[0x4];                             /* 0x4008 */
+       u32 spu_mb_W;                                           /* 0x400c */
+       u8  pad_0x4010_0x4014[0x4];                             /* 0x4010 */
+       u32 mb_stat_R;                                          /* 0x4014 */
+       u8  pad_0x4018_0x401c[0x4];                             /* 0x4018 */
+       u32 spu_runcntl_RW;                                     /* 0x401c */
+#define SPU_RUNCNTL_STOP       0L
+#define SPU_RUNCNTL_RUNNABLE   1L
+       u8  pad_0x4020_0x4024[0x4];                             /* 0x4020 */
+       u32 spu_status_R;                                       /* 0x4024 */
+#define SPU_STOP_STATUS_SHIFT           16
+#define SPU_STATUS_STOPPED             0x0
+#define SPU_STATUS_RUNNING             0x1
+#define SPU_STATUS_STOPPED_BY_STOP     0x2
+#define SPU_STATUS_STOPPED_BY_HALT     0x4
+#define SPU_STATUS_WAITING_FOR_CHANNEL 0x8
+#define SPU_STATUS_SINGLE_STEP         0x10
+#define SPU_STATUS_INVALID_INSTR        0x20
+#define SPU_STATUS_INVALID_CH           0x40
+#define SPU_STATUS_ISOLATED_STATE       0x80
+#define SPU_STATUS_ISOLATED_LOAD_STAUTUS 0x200
+#define SPU_STATUS_ISOLATED_EXIT_STAUTUS 0x400
+       u8  pad_0x4028_0x402c[0x4];                             /* 0x4028 */
+       u32 spu_spe_R;                                          /* 0x402c */
+       u8  pad_0x4030_0x4034[0x4];                             /* 0x4030 */
+       u32 spu_npc_RW;                                         /* 0x4034 */
+       u8  pad_0x4038_0x14000[0x14000 - 0x4038];               /* 0x4038 */
+
+       /* Signal Notification Area */
+       u8  pad_0x14000_0x1400c[0xc];                           /* 0x14000 */
+       u32 signal_notify1;                                     /* 0x1400c */
+       u8  pad_0x14010_0x1c00c[0x7ffc];                        /* 0x14010 */
+       u32 signal_notify2;                                     /* 0x1c00c */
+} __attribute__ ((aligned(0x20000)));
+
+/* SPU Privilege 2 State Area */
+struct spu_priv2 {
+       /* MFC Registers */
+       u8  pad_0x0000_0x1100[0x1100 - 0x0000];                 /* 0x0000 */
+
+       /* SLB Management Registers */
+       u8  pad_0x1100_0x1108[0x8];                             /* 0x1100 */
+       u64 slb_index_W;                                        /* 0x1108 */
+#define SLB_INDEX_MASK                         0x7L
+       u64 slb_esid_RW;                                        /* 0x1110 */
+       u64 slb_vsid_RW;                                        /* 0x1118 */
+#define SLB_VSID_SUPERVISOR_STATE      (0x1ull << 11)
+#define SLB_VSID_SUPERVISOR_STATE_MASK (0x1ull << 11)
+#define SLB_VSID_PROBLEM_STATE         (0x1ull << 10)
+#define SLB_VSID_PROBLEM_STATE_MASK    (0x1ull << 10)
+#define SLB_VSID_EXECUTE_SEGMENT       (0x1ull << 9)
+#define SLB_VSID_NO_EXECUTE_SEGMENT    (0x1ull << 9)
+#define SLB_VSID_EXECUTE_SEGMENT_MASK  (0x1ull << 9)
+#define SLB_VSID_4K_PAGE               (0x0 << 8)
+#define SLB_VSID_LARGE_PAGE            (0x1ull << 8)
+#define SLB_VSID_PAGE_SIZE_MASK                (0x1ull << 8)
+#define SLB_VSID_CLASS_MASK            (0x1ull << 7)
+#define SLB_VSID_VIRTUAL_PAGE_SIZE_MASK        (0x1ull << 6)
+       u64 slb_invalidate_entry_W;                             /* 0x1120 */
+       u64 slb_invalidate_all_W;                               /* 0x1128 */
+       u8  pad_0x1130_0x2000[0x2000 - 0x1130];                 /* 0x1130 */
+
+       /* Context Save / Restore Area */
+       struct mfc_cq_sr spuq[16];                              /* 0x2000 */
+       struct mfc_cq_sr puq[8];                                /* 0x2200 */
+       u8  pad_0x2300_0x3000[0x3000 - 0x2300];                 /* 0x2300 */
+
+       /* MFC Control */
+       u64 mfc_control_RW;                                     /* 0x3000 */
+#define MFC_CNTL_RESUME_DMA_QUEUE              (0ull << 0)
+#define MFC_CNTL_SUSPEND_DMA_QUEUE             (1ull << 0)
+#define MFC_CNTL_SUSPEND_DMA_QUEUE_MASK                (1ull << 0)
+#define MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION    (0ull << 8)
+#define MFC_CNTL_SUSPEND_IN_PROGRESS           (1ull << 8)
+#define MFC_CNTL_SUSPEND_COMPLETE              (3ull << 8)
+#define MFC_CNTL_SUSPEND_DMA_STATUS_MASK       (3ull << 8)
+#define MFC_CNTL_DMA_QUEUES_EMPTY              (1ull << 14)
+#define MFC_CNTL_DMA_QUEUES_EMPTY_MASK         (1ull << 14)
+#define MFC_CNTL_PURGE_DMA_REQUEST             (1ull << 15)
+#define MFC_CNTL_PURGE_DMA_IN_PROGRESS         (1ull << 24)
+#define MFC_CNTL_PURGE_DMA_COMPLETE            (3ull << 24)
+#define MFC_CNTL_PURGE_DMA_STATUS_MASK         (3ull << 24)
+#define MFC_CNTL_RESTART_DMA_COMMAND           (1ull << 32)
+#define MFC_CNTL_DMA_COMMAND_REISSUE_PENDING   (1ull << 32)
+#define MFC_CNTL_DMA_COMMAND_REISSUE_STATUS_MASK (1ull << 32)
+#define MFC_CNTL_MFC_PRIVILEGE_STATE           (2ull << 33)
+#define MFC_CNTL_MFC_PROBLEM_STATE             (3ull << 33)
+#define MFC_CNTL_MFC_KEY_PROTECTION_STATE_MASK (3ull << 33)
+#define MFC_CNTL_DECREMENTER_HALTED            (1ull << 35)
+#define MFC_CNTL_DECREMENTER_RUNNING           (1ull << 40)
+#define MFC_CNTL_DECREMENTER_STATUS_MASK       (1ull << 40)
+       u8  pad_0x3008_0x4000[0x4000 - 0x3008];                 /* 0x3008 */
+
+       /* Interrupt Mailbox */
+       u64 puint_mb_R;                                         /* 0x4000 */
+       u8  pad_0x4008_0x4040[0x4040 - 0x4008];                 /* 0x4008 */
+
+       /* SPU Control */
+       u64 spu_privcntl_RW;                                    /* 0x4040 */
+#define SPU_PRIVCNTL_MODE_NORMAL               (0x0ull << 0)
+#define SPU_PRIVCNTL_MODE_SINGLE_STEP          (0x1ull << 0)
+#define SPU_PRIVCNTL_MODE_MASK                 (0x1ull << 0)
+#define SPU_PRIVCNTL_NO_ATTENTION_EVENT                (0x0ull << 1)
+#define SPU_PRIVCNTL_ATTENTION_EVENT           (0x1ull << 1)
+#define SPU_PRIVCNTL_ATTENTION_EVENT_MASK      (0x1ull << 1)
+#define SPU_PRIVCNT_LOAD_REQUEST_NORMAL                (0x0ull << 2)
+#define SPU_PRIVCNT_LOAD_REQUEST_ENABLE_MASK   (0x1ull << 2)
+       u8  pad_0x4048_0x4058[0x10];                            /* 0x4048 */
+       u64 spu_lslr_RW;                                        /* 0x4058 */
+       u64 spu_chnlcntptr_RW;                                  /* 0x4060 */
+       u64 spu_chnlcnt_RW;                                     /* 0x4068 */
+       u64 spu_chnldata_RW;                                    /* 0x4070 */
+       u64 spu_cfg_RW;                                         /* 0x4078 */
+       u8  pad_0x4080_0x5000[0x5000 - 0x4080];                 /* 0x4080 */
+
+       /* PV2_ImplRegs: Implementation-specific privileged-state 2 regs */
+       u64 spu_pm_trace_tag_status_RW;                         /* 0x5000 */
+       u64 spu_tag_status_query_RW;                            /* 0x5008 */
+#define TAG_STATUS_QUERY_CONDITION_BITS (0x3ull << 32)
+#define TAG_STATUS_QUERY_MASK_BITS (0xffffffffull)
+       u64 spu_cmd_buf1_RW;                                    /* 0x5010 */
+#define SPU_COMMAND_BUFFER_1_LSA_BITS (0x7ffffull << 32)
+#define SPU_COMMAND_BUFFER_1_EAH_BITS (0xffffffffull)
+       u64 spu_cmd_buf2_RW;                                    /* 0x5018 */
+#define SPU_COMMAND_BUFFER_2_EAL_BITS ((0xffffffffull) << 32)
+#define SPU_COMMAND_BUFFER_2_TS_BITS (0xffffull << 16)
+#define SPU_COMMAND_BUFFER_2_TAG_BITS (0x3full)
+       u64 spu_atomic_status_RW;                               /* 0x5020 */
+} __attribute__ ((aligned(0x20000)));
+
+/* SPU Privilege 1 State Area */
+struct spu_priv1 {
+       /* Control and Configuration Area */
+       u64 mfc_sr1_RW;                                         /* 0x000 */
+#define MFC_STATE1_LOCAL_STORAGE_DECODE_MASK   0x01ull
+#define MFC_STATE1_BUS_TLBIE_MASK              0x02ull
+#define MFC_STATE1_REAL_MODE_OFFSET_ENABLE_MASK        0x04ull
+#define MFC_STATE1_PROBLEM_STATE_MASK          0x08ull
+#define MFC_STATE1_RELOCATE_MASK               0x10ull
+#define MFC_STATE1_MASTER_RUN_CONTROL_MASK     0x20ull
+       u64 mfc_lpid_RW;                                        /* 0x008 */
+       u64 spu_idr_RW;                                         /* 0x010 */
+       u64 mfc_vr_RO;                                          /* 0x018 */
+#define MFC_VERSION_BITS               (0xffff << 16)
+#define MFC_REVISION_BITS              (0xffff)
+#define MFC_GET_VERSION_BITS(vr)       (((vr) & MFC_VERSION_BITS) >> 16)
+#define MFC_GET_REVISION_BITS(vr)      ((vr) & MFC_REVISION_BITS)
+       u64 spu_vr_RO;                                          /* 0x020 */
+#define SPU_VERSION_BITS               (0xffff << 16)
+#define SPU_REVISION_BITS              (0xffff)
+#define SPU_GET_VERSION_BITS(vr)       (vr & SPU_VERSION_BITS) >> 16
+#define SPU_GET_REVISION_BITS(vr)      (vr & SPU_REVISION_BITS)
+       u8  pad_0x28_0x100[0x100 - 0x28];                       /* 0x28 */
+
+
+       /* Interrupt Area */
+       u64 int_mask_class0_RW;                                 /* 0x100 */
+#define CLASS0_ENABLE_DMA_ALIGNMENT_INTR               0x1L
+#define CLASS0_ENABLE_INVALID_DMA_COMMAND_INTR         0x2L
+#define CLASS0_ENABLE_SPU_ERROR_INTR                   0x4L
+#define CLASS0_ENABLE_MFC_FIR_INTR                     0x8L
+       u64 int_mask_class1_RW;                                 /* 0x108 */
+#define CLASS1_ENABLE_SEGMENT_FAULT_INTR               0x1L
+#define CLASS1_ENABLE_STORAGE_FAULT_INTR               0x2L
+#define CLASS1_ENABLE_LS_COMPARE_SUSPEND_ON_GET_INTR   0x4L
+#define CLASS1_ENABLE_LS_COMPARE_SUSPEND_ON_PUT_INTR   0x8L
+       u64 int_mask_class2_RW;                                 /* 0x110 */
+#define CLASS2_ENABLE_MAILBOX_INTR                     0x1L
+#define CLASS2_ENABLE_SPU_STOP_INTR                    0x2L
+#define CLASS2_ENABLE_SPU_HALT_INTR                    0x4L
+#define CLASS2_ENABLE_SPU_DMA_TAG_GROUP_COMPLETE_INTR  0x8L
+       u8  pad_0x118_0x140[0x28];                              /* 0x118 */
+       u64 int_stat_class0_RW;                                 /* 0x140 */
+       u64 int_stat_class1_RW;                                 /* 0x148 */
+       u64 int_stat_class2_RW;                                 /* 0x150 */
+       u8  pad_0x158_0x180[0x28];                              /* 0x158 */
+       u64 int_route_RW;                                       /* 0x180 */
+
+       /* Interrupt Routing */
+       u8  pad_0x188_0x200[0x200 - 0x188];                     /* 0x188 */
+
+       /* Atomic Unit Control Area */
+       u64 mfc_atomic_flush_RW;                                /* 0x200 */
+#define mfc_atomic_flush_enable                        0x1L
+       u8  pad_0x208_0x280[0x78];                              /* 0x208 */
+       u64 resource_allocation_groupID_RW;                     /* 0x280 */
+       u64 resource_allocation_enable_RW;                      /* 0x288 */
+       u8  pad_0x290_0x3c8[0x3c8 - 0x290];                     /* 0x290 */
+
+       /* SPU_Cache_ImplRegs: Implementation-dependent cache registers */
+
+       u64 smf_sbi_signal_sel;                                 /* 0x3c8 */
+#define smf_sbi_mask_lsb       56
+#define smf_sbi_shift          (63 - smf_sbi_mask_lsb)
+#define smf_sbi_mask           (0x301LL << smf_sbi_shift)
+#define smf_sbi_bus0_bits      (0x001LL << smf_sbi_shift)
+#define smf_sbi_bus2_bits      (0x100LL << smf_sbi_shift)
+#define smf_sbi2_bus0_bits     (0x201LL << smf_sbi_shift)
+#define smf_sbi2_bus2_bits     (0x300LL << smf_sbi_shift)
+       u64 smf_ato_signal_sel;                                 /* 0x3d0 */
+#define smf_ato_mask_lsb       35
+#define smf_ato_shift          (63 - smf_ato_mask_lsb)
+#define smf_ato_mask           (0x3LL << smf_ato_shift)
+#define smf_ato_bus0_bits      (0x2LL << smf_ato_shift)
+#define smf_ato_bus2_bits      (0x1LL << smf_ato_shift)
+       u8  pad_0x3d8_0x400[0x400 - 0x3d8];                     /* 0x3d8 */
+
+       /* TLB Management Registers */
+       u64 mfc_sdr_RW;                                         /* 0x400 */
+       u8  pad_0x408_0x500[0xf8];                              /* 0x408 */
+       u64 tlb_index_hint_RO;                                  /* 0x500 */
+       u64 tlb_index_W;                                        /* 0x508 */
+       u64 tlb_vpn_RW;                                         /* 0x510 */
+       u64 tlb_rpn_RW;                                         /* 0x518 */
+       u8  pad_0x520_0x540[0x20];                              /* 0x520 */
+       u64 tlb_invalidate_entry_W;                             /* 0x540 */
+       u64 tlb_invalidate_all_W;                               /* 0x548 */
+       u8  pad_0x550_0x580[0x580 - 0x550];                     /* 0x550 */
+
+       /* SPU_MMU_ImplRegs: Implementation-dependent MMU registers */
+       u64 smm_hid;                                            /* 0x580 */
+#define PAGE_SIZE_MASK         0xf000000000000000ull
+#define PAGE_SIZE_16MB_64KB    0x2000000000000000ull
+       u8  pad_0x588_0x600[0x600 - 0x588];                     /* 0x588 */
+
+       /* MFC Status/Control Area */
+       u64 mfc_accr_RW;                                        /* 0x600 */
+#define MFC_ACCR_EA_ACCESS_GET         (1 << 0)
+#define MFC_ACCR_EA_ACCESS_PUT         (1 << 1)
+#define MFC_ACCR_LS_ACCESS_GET         (1 << 3)
+#define MFC_ACCR_LS_ACCESS_PUT         (1 << 4)
+       u8  pad_0x608_0x610[0x8];                               /* 0x608 */
+       u64 mfc_dsisr_RW;                                       /* 0x610 */
+#define MFC_DSISR_PTE_NOT_FOUND                (1 << 30)
+#define MFC_DSISR_ACCESS_DENIED                (1 << 27)
+#define MFC_DSISR_ATOMIC               (1 << 26)
+#define MFC_DSISR_ACCESS_PUT           (1 << 25)
+#define MFC_DSISR_ADDR_MATCH           (1 << 22)
+#define MFC_DSISR_LS                   (1 << 17)
+#define MFC_DSISR_L                    (1 << 16)
+#define MFC_DSISR_ADDRESS_OVERFLOW     (1 << 0)
+       u8  pad_0x618_0x620[0x8];                               /* 0x618 */
+       u64 mfc_dar_RW;                                         /* 0x620 */
+       u8  pad_0x628_0x700[0x700 - 0x628];                     /* 0x628 */
+
+       /* Replacement Management Table (RMT) Area */
+       u64 rmt_index_RW;                                       /* 0x700 */
+       u8  pad_0x708_0x710[0x8];                               /* 0x708 */
+       u64 rmt_data1_RW;                                       /* 0x710 */
+       u8  pad_0x718_0x800[0x800 - 0x718];                     /* 0x718 */
+
+       /* Control/Configuration Registers */
+       u64 mfc_dsir_R;                                         /* 0x800 */
+#define MFC_DSIR_Q                     (1 << 31)
+#define MFC_DSIR_SPU_QUEUE             MFC_DSIR_Q
+       u64 mfc_lsacr_RW;                                       /* 0x808 */
+#define MFC_LSACR_COMPARE_MASK         ((~0ull) << 32)
+#define MFC_LSACR_COMPARE_ADDR         ((~0ull) >> 32)
+       u64 mfc_lscrr_R;                                        /* 0x810 */
+#define MFC_LSCRR_Q                    (1 << 31)
+#define MFC_LSCRR_SPU_QUEUE            MFC_LSCRR_Q
+#define MFC_LSCRR_QI_SHIFT             32
+#define MFC_LSCRR_QI_MASK              ((~0ull) << MFC_LSCRR_QI_SHIFT)
+       u8  pad_0x818_0x820[0x8];                               /* 0x818 */
+       u64 mfc_tclass_id_RW;                                   /* 0x820 */
+#define MFC_TCLASS_ID_ENABLE           (1L << 0L)
+#define MFC_TCLASS_SLOT2_ENABLE                (1L << 5L)
+#define MFC_TCLASS_SLOT1_ENABLE                (1L << 6L)
+#define MFC_TCLASS_SLOT0_ENABLE                (1L << 7L)
+#define MFC_TCLASS_QUOTA_2_SHIFT       8L
+#define MFC_TCLASS_QUOTA_1_SHIFT       16L
+#define MFC_TCLASS_QUOTA_0_SHIFT       24L
+#define MFC_TCLASS_QUOTA_2_MASK                (0x1FL << MFC_TCLASS_QUOTA_2_SHIFT)
+#define MFC_TCLASS_QUOTA_1_MASK                (0x1FL << MFC_TCLASS_QUOTA_1_SHIFT)
+#define MFC_TCLASS_QUOTA_0_MASK                (0x1FL << MFC_TCLASS_QUOTA_0_SHIFT)
+       u8  pad_0x828_0x900[0x900 - 0x828];                     /* 0x828 */
+
+       /* Real Mode Support Registers */
+       u64 mfc_rm_boundary;                                    /* 0x900 */
+       u8  pad_0x908_0x938[0x30];                              /* 0x908 */
+       u64 smf_dma_signal_sel;                                 /* 0x938 */
+#define mfc_dma1_mask_lsb      41
+#define mfc_dma1_shift         (63 - mfc_dma1_mask_lsb)
+#define mfc_dma1_mask          (0x3LL << mfc_dma1_shift)
+#define mfc_dma1_bits          (0x1LL << mfc_dma1_shift)
+#define mfc_dma2_mask_lsb      43
+#define mfc_dma2_shift         (63 - mfc_dma2_mask_lsb)
+#define mfc_dma2_mask          (0x3LL << mfc_dma2_shift)
+#define mfc_dma2_bits          (0x1LL << mfc_dma2_shift)
+       u8  pad_0x940_0xa38[0xf8];                              /* 0x940 */
+       u64 smm_signal_sel;                                     /* 0xa38 */
+#define smm_sig_mask_lsb       12
+#define smm_sig_shift          (63 - smm_sig_mask_lsb)
+#define smm_sig_mask           (0x3LL << smm_sig_shift)
+#define smm_sig_bus0_bits      (0x2LL << smm_sig_shift)
+#define smm_sig_bus2_bits      (0x1LL << smm_sig_shift)
+       u8  pad_0xa40_0xc00[0xc00 - 0xa40];                     /* 0xa40 */
+
+       /* DMA Command Error Area */
+       u64 mfc_cer_R;                                          /* 0xc00 */
+#define MFC_CER_Q              (1 << 31)
+#define MFC_CER_SPU_QUEUE      MFC_CER_Q
+       u8  pad_0xc08_0x1000[0x1000 - 0xc08];                   /* 0xc08 */
+
+       /* PV1_ImplRegs: Implementation-dependent privileged-state 1 regs */
+       /* DMA Command Error Area */
+       u64 spu_ecc_cntl_RW;                                    /* 0x1000 */
+#define SPU_ECC_CNTL_E                 (1ull << 0ull)
+#define SPU_ECC_CNTL_ENABLE            SPU_ECC_CNTL_E
+#define SPU_ECC_CNTL_DISABLE           (~SPU_ECC_CNTL_E & 1L)
+#define SPU_ECC_CNTL_S                 (1ull << 1ull)
+#define SPU_ECC_STOP_AFTER_ERROR       SPU_ECC_CNTL_S
+#define SPU_ECC_CONTINUE_AFTER_ERROR   (~SPU_ECC_CNTL_S & 2L)
+#define SPU_ECC_CNTL_B                 (1ull << 2ull)
+#define SPU_ECC_BACKGROUND_ENABLE      SPU_ECC_CNTL_B
+#define SPU_ECC_BACKGROUND_DISABLE     (~SPU_ECC_CNTL_B & 4L)
+#define SPU_ECC_CNTL_I_SHIFT           3ull
+#define SPU_ECC_CNTL_I_MASK            (3ull << SPU_ECC_CNTL_I_SHIFT)
+#define SPU_ECC_WRITE_ALWAYS           (~SPU_ECC_CNTL_I & 12L)
+#define SPU_ECC_WRITE_CORRECTABLE      (1ull << SPU_ECC_CNTL_I_SHIFT)
+#define SPU_ECC_WRITE_UNCORRECTABLE    (3ull << SPU_ECC_CNTL_I_SHIFT)
+#define SPU_ECC_CNTL_D                 (1ull << 5ull)
+#define SPU_ECC_DETECTION_ENABLE       SPU_ECC_CNTL_D
+#define SPU_ECC_DETECTION_DISABLE      (~SPU_ECC_CNTL_D & 32L)
+       u64 spu_ecc_stat_RW;                                    /* 0x1008 */
+#define SPU_ECC_CORRECTED_ERROR                (1ull << 0ul)
+#define SPU_ECC_UNCORRECTED_ERROR      (1ull << 1ul)
+#define SPU_ECC_SCRUB_COMPLETE         (1ull << 2ul)
+#define SPU_ECC_SCRUB_IN_PROGRESS      (1ull << 3ul)
+#define SPU_ECC_INSTRUCTION_ERROR      (1ull << 4ul)
+#define SPU_ECC_DATA_ERROR             (1ull << 5ul)
+#define SPU_ECC_DMA_ERROR              (1ull << 6ul)
+#define SPU_ECC_STATUS_CNT_MASK                (256ull << 8)
+       u64 spu_ecc_addr_RW;                                    /* 0x1010 */
+       u64 spu_err_mask_RW;                                    /* 0x1018 */
+#define SPU_ERR_ILLEGAL_INSTR          (1ull << 0ul)
+#define SPU_ERR_ILLEGAL_CHANNEL                (1ull << 1ul)
+       u8  pad_0x1020_0x1028[0x1028 - 0x1020];                 /* 0x1020 */
+
+       /* SPU Debug-Trace Bus (DTB) Selection Registers */
+       u64 spu_trig0_sel;                                      /* 0x1028 */
+       u64 spu_trig1_sel;                                      /* 0x1030 */
+       u64 spu_trig2_sel;                                      /* 0x1038 */
+       u64 spu_trig3_sel;                                      /* 0x1040 */
+       u64 spu_trace_sel;                                      /* 0x1048 */
+#define spu_trace_sel_mask             0x1f1fLL
+#define spu_trace_sel_bus0_bits                0x1000LL
+#define spu_trace_sel_bus2_bits                0x0010LL
+       u64 spu_event0_sel;                                     /* 0x1050 */
+       u64 spu_event1_sel;                                     /* 0x1058 */
+       u64 spu_event2_sel;                                     /* 0x1060 */
+       u64 spu_event3_sel;                                     /* 0x1068 */
+       u64 spu_trace_cntl;                                     /* 0x1070 */
+} __attribute__ ((aligned(0x2000)));
+
+#endif
index 0991dfceef1df98979a45e5431107190752227a7..9606349855dacc074984ce6dbdeea8f1e6c69863 100644 (file)
 #define __NR_inotify_init      275
 #define __NR_inotify_add_watch 276
 #define __NR_inotify_rm_watch  277
+#define __NR_spu_run           278
+#define __NR_spu_create                279
 
 #define __NR_syscalls          278
 
index c7007b1db91d6beece5fd0143c0f519304e629c4..44fdd48d38e6ca75c5623020af113e837b3f9cc5 100644 (file)
@@ -512,4 +512,9 @@ asmlinkage long sys_ioprio_get(int which, int who);
 asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
                                        unsigned long maxnode);
 
+asmlinkage long sys_spu_run(int fd, __u32 __user *unpc,
+                                __u32 __user *ustatus);
+asmlinkage long sys_spu_create(const char __user *name,
+               unsigned int flags, mode_t mode);
+
 #endif
index 1ab2370e2efaee04f62334ae98a778ed3bbf9398..d4739a475d23045fe8ae79de106595f642255532 100644 (file)
@@ -90,3 +90,5 @@ cond_syscall(sys_pciconfig_iobase);
 cond_syscall(sys32_ipc);
 cond_syscall(sys32_sysctl);
 cond_syscall(ppc_rtas);
+cond_syscall(sys_spu_run);
+cond_syscall(sys_spu_create);
index 7197f9bcd384d99bdd51d858cb8438a67a4198a0..3944fec380125b8698e47468e5b0ea4d7407060a 100644 (file)
@@ -2267,6 +2267,8 @@ int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        return handle_pte_fault(mm, vma, address, pte, pmd, write_access);
 }
 
+EXPORT_SYMBOL_GPL(__handle_mm_fault);
+
 #ifndef __PAGETABLE_PUD_FOLDED
 /*
  * Allocate page upper directory.