summaryrefslogtreecommitdiff
path: root/docs/doxygen/other
diff options
context:
space:
mode:
Diffstat (limited to 'docs/doxygen/other')
-rwxr-xr-xdocs/doxygen/other/doxypy.py414
-rw-r--r--docs/doxygen/other/extra_pages.dox326
-rw-r--r--docs/doxygen/other/group_defs.dox68
-rw-r--r--docs/doxygen/other/main_page.dox405
-rw-r--r--docs/doxygen/other/metadata.dox330
-rw-r--r--docs/doxygen/other/msg_passing.dox269
-rw-r--r--docs/doxygen/other/pfb_intro.dox96
-rw-r--r--docs/doxygen/other/pmt.dox348
-rwxr-xr-xdocs/doxygen/other/shared_ptr_docstub.h24
-rw-r--r--docs/doxygen/other/thread_affinity.dox65
-rw-r--r--docs/doxygen/other/tv-channel-frequencies79
-rw-r--r--docs/doxygen/other/vector_docstub.h16
12 files changed, 2440 insertions, 0 deletions
diff --git a/docs/doxygen/other/doxypy.py b/docs/doxygen/other/doxypy.py
new file mode 100755
index 000000000..15d3e0824
--- /dev/null
+++ b/docs/doxygen/other/doxypy.py
@@ -0,0 +1,414 @@
+#!/usr/bin/env python
+
+__applicationName__ = "doxypy"
+__blurb__ = """
+doxypy is an input filter for Doxygen. It preprocesses python
+files so that docstrings of classes and functions are reformatted
+into Doxygen-conform documentation blocks.
+"""
+
+__doc__ = __blurb__ + \
+"""
+In order to make Doxygen preprocess files through doxypy, simply
+add the following lines to your Doxyfile:
+ FILTER_SOURCE_FILES = YES
+ INPUT_FILTER = "python /path/to/doxypy.py"
+"""
+
+__version__ = "0.4.1"
+__date__ = "5th December 2008"
+__website__ = "http://code.foosel.org/doxypy"
+
+__author__ = (
+ "Philippe 'demod' Neumann (doxypy at demod dot org)",
+ "Gina 'foosel' Haeussge (gina at foosel dot net)"
+)
+
+__licenseName__ = "GPL v2"
+__license__ = """This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 2 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import sys
+import re
+
+from optparse import OptionParser, OptionGroup
+
+class FSM(object):
+ """Implements a finite state machine.
+
+ Transitions are given as 4-tuples, consisting of an origin state, a target
+ state, a condition for the transition (given as a reference to a function
+ which gets called with a given piece of input) and a pointer to a function
+ to be called upon the execution of the given transition.
+ """
+
+ """
+ @var transitions holds the transitions
+ @var current_state holds the current state
+ @var current_input holds the current input
+ @var current_transition hold the currently active transition
+ """
+
+ def __init__(self, start_state=None, transitions=[]):
+ self.transitions = transitions
+ self.current_state = start_state
+ self.current_input = None
+ self.current_transition = None
+
+ def setStartState(self, state):
+ self.current_state = state
+
+ def addTransition(self, from_state, to_state, condition, callback):
+ self.transitions.append([from_state, to_state, condition, callback])
+
+ def makeTransition(self, input):
+ """ Makes a transition based on the given input.
+
+ @param input input to parse by the FSM
+ """
+ for transition in self.transitions:
+ [from_state, to_state, condition, callback] = transition
+ if from_state == self.current_state:
+ match = condition(input)
+ if match:
+ self.current_state = to_state
+ self.current_input = input
+ self.current_transition = transition
+ if options.debug:
+ print >>sys.stderr, "# FSM: executing (%s -> %s) for line '%s'" % (from_state, to_state, input)
+ callback(match)
+ return
+
+class Doxypy(object):
+ def __init__(self):
+ string_prefixes = "[uU]?[rR]?"
+
+ self.start_single_comment_re = re.compile("^\s*%s(''')" % string_prefixes)
+ self.end_single_comment_re = re.compile("(''')\s*$")
+
+ self.start_double_comment_re = re.compile("^\s*%s(\"\"\")" % string_prefixes)
+ self.end_double_comment_re = re.compile("(\"\"\")\s*$")
+
+ self.single_comment_re = re.compile("^\s*%s(''').*(''')\s*$" % string_prefixes)
+ self.double_comment_re = re.compile("^\s*%s(\"\"\").*(\"\"\")\s*$" % string_prefixes)
+
+ self.defclass_re = re.compile("^(\s*)(def .+:|class .+:)")
+ self.empty_re = re.compile("^\s*$")
+ self.hashline_re = re.compile("^\s*#.*$")
+ self.importline_re = re.compile("^\s*(import |from .+ import)")
+
+ self.multiline_defclass_start_re = re.compile("^(\s*)(def|class)(\s.*)?$")
+ self.multiline_defclass_end_re = re.compile(":\s*$")
+
+ ## Transition list format
+ # ["FROM", "TO", condition, action]
+ transitions = [
+ ### FILEHEAD
+
+ # single line comments
+ ["FILEHEAD", "FILEHEAD", self.single_comment_re.search, self.appendCommentLine],
+ ["FILEHEAD", "FILEHEAD", self.double_comment_re.search, self.appendCommentLine],
+
+ # multiline comments
+ ["FILEHEAD", "FILEHEAD_COMMENT_SINGLE", self.start_single_comment_re.search, self.appendCommentLine],
+ ["FILEHEAD_COMMENT_SINGLE", "FILEHEAD", self.end_single_comment_re.search, self.appendCommentLine],
+ ["FILEHEAD_COMMENT_SINGLE", "FILEHEAD_COMMENT_SINGLE", self.catchall, self.appendCommentLine],
+ ["FILEHEAD", "FILEHEAD_COMMENT_DOUBLE", self.start_double_comment_re.search, self.appendCommentLine],
+ ["FILEHEAD_COMMENT_DOUBLE", "FILEHEAD", self.end_double_comment_re.search, self.appendCommentLine],
+ ["FILEHEAD_COMMENT_DOUBLE", "FILEHEAD_COMMENT_DOUBLE", self.catchall, self.appendCommentLine],
+
+ # other lines
+ ["FILEHEAD", "FILEHEAD", self.empty_re.search, self.appendFileheadLine],
+ ["FILEHEAD", "FILEHEAD", self.hashline_re.search, self.appendFileheadLine],
+ ["FILEHEAD", "FILEHEAD", self.importline_re.search, self.appendFileheadLine],
+ ["FILEHEAD", "DEFCLASS", self.defclass_re.search, self.resetCommentSearch],
+ ["FILEHEAD", "DEFCLASS_MULTI", self.multiline_defclass_start_re.search, self.resetCommentSearch],
+ ["FILEHEAD", "DEFCLASS_BODY", self.catchall, self.appendFileheadLine],
+
+ ### DEFCLASS
+
+ # single line comments
+ ["DEFCLASS", "DEFCLASS_BODY", self.single_comment_re.search, self.appendCommentLine],
+ ["DEFCLASS", "DEFCLASS_BODY", self.double_comment_re.search, self.appendCommentLine],
+
+ # multiline comments
+ ["DEFCLASS", "COMMENT_SINGLE", self.start_single_comment_re.search, self.appendCommentLine],
+ ["COMMENT_SINGLE", "DEFCLASS_BODY", self.end_single_comment_re.search, self.appendCommentLine],
+ ["COMMENT_SINGLE", "COMMENT_SINGLE", self.catchall, self.appendCommentLine],
+ ["DEFCLASS", "COMMENT_DOUBLE", self.start_double_comment_re.search, self.appendCommentLine],
+ ["COMMENT_DOUBLE", "DEFCLASS_BODY", self.end_double_comment_re.search, self.appendCommentLine],
+ ["COMMENT_DOUBLE", "COMMENT_DOUBLE", self.catchall, self.appendCommentLine],
+
+ # other lines
+ ["DEFCLASS", "DEFCLASS", self.empty_re.search, self.appendDefclassLine],
+ ["DEFCLASS", "DEFCLASS", self.defclass_re.search, self.resetCommentSearch],
+ ["DEFCLASS", "DEFCLASS_MULTI", self.multiline_defclass_start_re.search, self.resetCommentSearch],
+ ["DEFCLASS", "DEFCLASS_BODY", self.catchall, self.stopCommentSearch],
+
+ ### DEFCLASS_BODY
+
+ ["DEFCLASS_BODY", "DEFCLASS", self.defclass_re.search, self.startCommentSearch],
+ ["DEFCLASS_BODY", "DEFCLASS_MULTI", self.multiline_defclass_start_re.search, self.startCommentSearch],
+ ["DEFCLASS_BODY", "DEFCLASS_BODY", self.catchall, self.appendNormalLine],
+
+ ### DEFCLASS_MULTI
+ ["DEFCLASS_MULTI", "DEFCLASS", self.multiline_defclass_end_re.search, self.appendDefclassLine],
+ ["DEFCLASS_MULTI", "DEFCLASS_MULTI", self.catchall, self.appendDefclassLine],
+ ]
+
+ self.fsm = FSM("FILEHEAD", transitions)
+ self.outstream = sys.stdout
+
+ self.output = []
+ self.comment = []
+ self.filehead = []
+ self.defclass = []
+ self.indent = ""
+
+ def __closeComment(self):
+ """Appends any open comment block and triggering block to the output."""
+
+ if options.autobrief:
+ if len(self.comment) == 1 \
+ or (len(self.comment) > 2 and self.comment[1].strip() == ''):
+ self.comment[0] = self.__docstringSummaryToBrief(self.comment[0])
+
+ if self.comment:
+ block = self.makeCommentBlock()
+ self.output.extend(block)
+
+ if self.defclass:
+ self.output.extend(self.defclass)
+
+ def __docstringSummaryToBrief(self, line):
+ """Adds \\brief to the docstrings summary line.
+
+ A \\brief is prepended, provided no other doxygen command is at the
+ start of the line.
+ """
+ stripped = line.strip()
+ if stripped and not stripped[0] in ('@', '\\'):
+ return "\\brief " + line
+ else:
+ return line
+
+ def __flushBuffer(self):
+ """Flushes the current outputbuffer to the outstream."""
+ if self.output:
+ try:
+ if options.debug:
+ print >>sys.stderr, "# OUTPUT: ", self.output
+ print >>self.outstream, "\n".join(self.output)
+ self.outstream.flush()
+ except IOError:
+ # Fix for FS#33. Catches "broken pipe" when doxygen closes
+ # stdout prematurely upon usage of INPUT_FILTER, INLINE_SOURCES
+ # and FILTER_SOURCE_FILES.
+ pass
+ self.output = []
+
+ def catchall(self, input):
+ """The catchall-condition, always returns true."""
+ return True
+
+ def resetCommentSearch(self, match):
+ """Restarts a new comment search for a different triggering line.
+
+ Closes the current commentblock and starts a new comment search.
+ """
+ if options.debug:
+ print >>sys.stderr, "# CALLBACK: resetCommentSearch"
+ self.__closeComment()
+ self.startCommentSearch(match)
+
+ def startCommentSearch(self, match):
+ """Starts a new comment search.
+
+ Saves the triggering line, resets the current comment and saves
+ the current indentation.
+ """
+ if options.debug:
+ print >>sys.stderr, "# CALLBACK: startCommentSearch"
+ self.defclass = [self.fsm.current_input]
+ self.comment = []
+ self.indent = match.group(1)
+
+ def stopCommentSearch(self, match):
+ """Stops a comment search.
+
+ Closes the current commentblock, resets the triggering line and
+ appends the current line to the output.
+ """
+ if options.debug:
+ print >>sys.stderr, "# CALLBACK: stopCommentSearch"
+ self.__closeComment()
+
+ self.defclass = []
+ self.output.append(self.fsm.current_input)
+
+ def appendFileheadLine(self, match):
+ """Appends a line in the FILEHEAD state.
+
+ Closes the open comment block, resets it and appends the current line.
+ """
+ if options.debug:
+ print >>sys.stderr, "# CALLBACK: appendFileheadLine"
+ self.__closeComment()
+ self.comment = []
+ self.output.append(self.fsm.current_input)
+
+ def appendCommentLine(self, match):
+ """Appends a comment line.
+
+ The comment delimiter is removed from multiline start and ends as
+ well as singleline comments.
+ """
+ if options.debug:
+ print >>sys.stderr, "# CALLBACK: appendCommentLine"
+ (from_state, to_state, condition, callback) = self.fsm.current_transition
+
+ # single line comment
+ if (from_state == "DEFCLASS" and to_state == "DEFCLASS_BODY") \
+ or (from_state == "FILEHEAD" and to_state == "FILEHEAD"):
+ # remove comment delimiter from begin and end of the line
+ activeCommentDelim = match.group(1)
+ line = self.fsm.current_input
+ self.comment.append(line[line.find(activeCommentDelim)+len(activeCommentDelim):line.rfind(activeCommentDelim)])
+
+ if (to_state == "DEFCLASS_BODY"):
+ self.__closeComment()
+ self.defclass = []
+ # multiline start
+ elif from_state == "DEFCLASS" or from_state == "FILEHEAD":
+ # remove comment delimiter from begin of the line
+ activeCommentDelim = match.group(1)
+ line = self.fsm.current_input
+ self.comment.append(line[line.find(activeCommentDelim)+len(activeCommentDelim):])
+ # multiline end
+ elif to_state == "DEFCLASS_BODY" or to_state == "FILEHEAD":
+ # remove comment delimiter from end of the line
+ activeCommentDelim = match.group(1)
+ line = self.fsm.current_input
+ self.comment.append(line[0:line.rfind(activeCommentDelim)])
+ if (to_state == "DEFCLASS_BODY"):
+ self.__closeComment()
+ self.defclass = []
+ # in multiline comment
+ else:
+ # just append the comment line
+ self.comment.append(self.fsm.current_input)
+
+ def appendNormalLine(self, match):
+ """Appends a line to the output."""
+ if options.debug:
+ print >>sys.stderr, "# CALLBACK: appendNormalLine"
+ self.output.append(self.fsm.current_input)
+
+ def appendDefclassLine(self, match):
+ """Appends a line to the triggering block."""
+ if options.debug:
+ print >>sys.stderr, "# CALLBACK: appendDefclassLine"
+ self.defclass.append(self.fsm.current_input)
+
+ def makeCommentBlock(self):
+ """Indents the current comment block with respect to the current
+ indentation level.
+
+ @returns a list of indented comment lines
+ """
+ doxyStart = "##"
+ commentLines = self.comment
+
+ commentLines = map(lambda x: "%s# %s" % (self.indent, x), commentLines)
+ l = [self.indent + doxyStart]
+ l.extend(commentLines)
+
+ return l
+
+ def parse(self, input):
+ """Parses a python file given as input string and returns the doxygen-
+ compatible representation.
+
+ @param input the python code to parse
+ @returns the modified python code
+ """
+ lines = input.split("\n")
+
+ for line in lines:
+ self.fsm.makeTransition(line)
+
+ if self.fsm.current_state == "DEFCLASS":
+ self.__closeComment()
+
+ return "\n".join(self.output)
+
+ def parseFile(self, filename):
+ """Parses a python file given as input string and returns the doxygen-
+ compatible representation.
+
+ @param input the python code to parse
+ @returns the modified python code
+ """
+ f = open(filename, 'r')
+
+ for line in f:
+ self.parseLine(line.rstrip('\r\n'))
+ if self.fsm.current_state == "DEFCLASS":
+ self.__closeComment()
+ self.__flushBuffer()
+ f.close()
+
+ def parseLine(self, line):
+ """Parse one line of python and flush the resulting output to the
+ outstream.
+
+ @param line the python code line to parse
+ """
+ self.fsm.makeTransition(line)
+ self.__flushBuffer()
+
+def optParse():
+ """Parses commandline options."""
+ parser = OptionParser(prog=__applicationName__, version="%prog " + __version__)
+
+ parser.set_usage("%prog [options] filename")
+ parser.add_option("--autobrief",
+ action="store_true", dest="autobrief",
+ help="use the docstring summary line as \\brief description"
+ )
+ parser.add_option("--debug",
+ action="store_true", dest="debug",
+ help="enable debug output on stderr"
+ )
+
+ ## parse options
+ global options
+ (options, filename) = parser.parse_args()
+
+ if not filename:
+ print >>sys.stderr, "No filename given."
+ sys.exit(-1)
+
+ return filename[0]
+
+def main():
+ """Starts the parser on the file given by the filename as the first
+ argument on the commandline.
+ """
+ filename = optParse()
+ fsm = Doxypy()
+ fsm.parseFile(filename)
+
+if __name__ == "__main__":
+ main()
diff --git a/docs/doxygen/other/extra_pages.dox b/docs/doxygen/other/extra_pages.dox
new file mode 100644
index 000000000..d40c692e0
--- /dev/null
+++ b/docs/doxygen/other/extra_pages.dox
@@ -0,0 +1,326 @@
+/*! \page build_guide Build Instructions and Information
+
+\section dependencies Dependencies
+
+The list of GNU Radio dependencies and the minimum required versions,
+if any, to build the various GNU Radio components.
+
+Most of these components do not need to be individually compiled or
+installed. Instead, rely on your operating system's package manager or
+binary installation process (the <b>apt-get</b> system in Debian and
+Ubuntu, <b>yum</b> in RedHat and Fedora, etc.). GNU Radio tries to keep an
+up-to-date build guide for the majority of the supported operating
+systems on gnuradio.org
+(http://gnuradio.org/redmine/projects/gnuradio/wiki/BuildGuide).
+
+Not all dependencies are required for all components, and not all
+components are required for a given installation. The list of required
+components is determined by what the user requires from GNU Radio. If,
+for example, you do not use any Comedi-based hardware, do not worry
+about building gr-comedi.
+
+Before trying to build these from source, please try your system's
+installation tool (apt-get, pkg_install, YaST, yum, urpmi, etc.)
+first. Most recent systems have these packages available.
+
+\subsection dep_global Global Dependencies
+\li git http://code.google.com/p/msysgit
+\li cmake (>= 2.6) http://www.cmake.org/cmake/resources/software.html
+\li boost (>= 1.35) http://www.boostpro.com/download
+\li cppunit (>= 1.9.14) http://gaiacrtn.free.fr/cppunit/index.html
+\li fftw3f (>= 3.0) http://www.fftw.org/install/windows.html
+
+\subsection dep_python Python Wrappers
+\li python (>= 2.5) http://www.python.org/download/
+\li swig (>= 1.3.31) http://www.swig.org/download.html
+\li numpy (>= 1.1.0) http://sourceforge.net/projects/numpy/files/NumPy/
+
+\subsection dep_docs docs: Building the documentation
+\li doxygen (>= 1.5) http://www.stack.nl/~dimitri/doxygen/download.html
+
+\subsection dep_grc grc: The GNU Radio Companion
+\li Cheetah (>= 2.0) http://www.cheetahtemplate.org/
+\li pygtk (>= 2.10) http://www.pygtk.org/downloads.html
+
+\subsection dep_wavelet gr-wavelet: Collection of wavelet blocks
+\li gsl (>= 1.10) http://gnuwin32.sourceforge.net/packages/gsl.htm
+
+\subsection dep_gr_qtgui gr-qtgui: The QT-based Graphical User Interface
+\li qt (>= 4.4) http://qt.nokia.com/downloads/
+\li qwt (>= 5.2) http://sourceforge.net/projects/qwt/
+\li pyqt (>= 4.4) http://www.riverbankcomputing.co.uk/software/pyqt/download
+\li pyqwt (>= 5.2) http://pyqwt.sourceforge.net/download.html
+
+\subsection dep_gr_wxgui gr-wxgui: The WX-based Graphical User Interface
+\li wxpython (>= 2.8) http://www.wxpython.org/
+\li python-lxml (>= 1.3.6) http://lxml.de/
+
+\subsection dep_gr_audio gr-audio: Audio Subsystems (system/OS dependent)
+\li audio-alsa (>= 0.9) http://www.alsa-project.org
+\li audio-jack (>= 0.8) http://jackaudio.org/
+\li portaduio (>= 19) http://www.portaudio.com/
+\li audio-oss (>= 1.0) http://www.opensound.com/oss.html
+\li audio-osx
+\li audio-windows
+
+It is not necessary to satisfy all of these dependencies; just the
+one(s) that are right for your system. On Linux, don't expect
+audio-osx and audio-windows to be either satisfied or built.
+
+\subsection dep_uhd uhd: The Ettus USRP Hardware Driver Interface
+\li uhd (>= 3.0.0) http://code.ettus.com/redmine/ettus/projects/uhd/wiki
+
+\subsection dep_shd shd: The Symplex Hardware Driver Interface
+\li shd (>= 3.0.0)
+
+\subsection dep_gr_video_sdl gr-video-sdl: PAL and NTSC display
+\li SDL (>= 1.2.0) http://www.libsdl.org/download-1.2.php
+
+\subsection dep_gr_comedi gr-comedi: Comedi hardware interface
+\li comedilib (>= 0.8) http://www.comedi.org/
+
+
+
+\section build_gr_cmake Building GNU Radio
+
+GNU Radio is built using the Cmake build system
+(http://www.cmake.org/). The standard build method is as follows:
+
+\code
+$ mkdir $(builddir)
+$ cd $(builddir)
+$ cmake [OPTIONS] $(srcdir)
+$ make
+$ make test
+$ sudo make install
+\endcode
+
+The \$(builddir) is the directory in which the code is built. This
+<b>cannot</b> be the same path as where the source code resides. Often,
+\$(builddir) is \$(srcdir)/build.
+
+\subsection Cmake Options
+
+Options can be used to specify where to find various library or
+include file dependencies that are not automatically being found
+(-DCMAKE_PREFIX_PATH) or set the prefix
+(-DCMAKE_INSTALL_PREFIX=(dir)).
+
+Components can also be enabled and disabled through the options. For a
+component named *gr-comp*, the option to disable would look like:
+-DENABLE_GR_COMP=off. The "off" could also be "false" or "no", and
+cmake is not case sensitive about these options. Similarly, "true",
+"on", or "yes" will turn this component on. All components are enabled
+by default.
+
+An example is -DENABLE_PYTHON=False turns off building any Python or
+Swigging components. The result will be the GNU Radio libraries and
+C++ programs/applications/examples. No Python or GRC files will be
+built or installed.
+
+The -DENABLE_DEFAULT=False can be used to disable all
+components. Individual components can then be selectively turned back
+on. For example, just buidling the Volk and Gruel libraries can be
+done with this:
+
+\code
+cmake -DENABLE_DEFAULT=Off -DENABLE_VOLK=True -DENABLE_GRUEL=True <srcdir>
+\endcode
+
+
+The build type allows you to specify the build as a debug or release
+version. Each type sets different flags for different purposes. To set
+the build type, use:
+
+\code
+-DCMAKE_BUILD_TYPE="Release"|"Debug"
+\endcode
+
+If not specified, the "Release" mode is the defaulted to.
+
+"Release" mode sets the '-O3' optimization flag.
+
+"Debug" mode sets '-g -O2' flags to export debug symbols and reduce
+the optimization to make the libraries easier to debug and step
+through.
+
+
+\subsection build_gr_cmake_e100 Building for the E100
+
+To build GNU Radio on the Ettus Research E100 embedded platforms,
+Cmake has to know that the processors uses the NEON extensions. Use
+the
+
+\code
+cmake -DCMAKE_CXX_FLAGS:STRING="-mcpu=cortex-a8 -mfpu=neon -mfloat-abi=softfp -g" \
+ -DCMAKE_C_FLAGS:STRING="-mcpu=cortex-a8 -mfpu=neon -mfloat-abi=softfp -g" \
+ <gr_source_dir>
+\endcode
+
+*/
+
+
+
+/*! \page volk_guide Instructions for using Volk in GNU Radio
+
+\section volk_intro Introduction
+
+Volk is the Vector-Optimized Library of Kernels. It is a library that
+contains kernels of hand-written SIMD code for different mathematical
+operations. Since each SIMD architecture can be greatly different and
+no compiler has yet come along to handle vectorization properly or
+highly efficiently, Volk approaches the problem differently. For each
+architecture or platform that a developer wishes to vectorize for, a
+new proto-kernel is added to Volk. At runtime, Volk will select the
+correct proto-kernel. In this way, the users of Volk call a kernel for
+performing the operation that is platform/architecture agnostic. This
+allows us to write portable SIMD code.
+
+Volk kernels are always defined with a 'generic' proto-kernel, which
+is written in plain C. With the generic kernel, the kernel becomes
+portable to any platform. Kernels are then extended by adding
+proto-kernels for new platforms in which they are desired.
+
+A good example of a Volk kernel with multiple proto-kernels defined is
+the volk_32f_s32f_multiply_32f_a. This kernel implements a scalar
+multiplication of a vector of floating point numbers (each item in the
+vector is multiplied by the same value). This kernel has the following
+proto-kernels that are defined for 'generic,' 'avx,' 'sse,' and 'orc.'
+
+\code
+ void volk_32f_s32f_multiply_32f_a_generic
+ void volk_32f_s32f_multiply_32f_a_sse
+ void volk_32f_s32f_multiply_32f_a_avx
+ void volk_32f_s32f_multiply_32f_a_orc
+\endcode
+
+These proto-kernels means that on platforms with AVX support, Volk can
+select this option or the SSE option, depending on which is faster. On
+other platforms, the ORC SIMD compiler might provide a solution. If
+all else fails, Volk can fall back on the generic proto-kernel, which
+will always work.
+
+Just a note on ORC. ORC is a SIMD compiler library that uses a generic
+assembly-like language for SIMD commands. Based on the available SIMD
+architecture of a system, it will try and compile a good
+solution. Tests show that the results of ORC proto-kernels are
+generally better than the generic versions but often not as good as
+the hand-tuned proto-kernels for a specific SIMD architecture. This
+is, of course, to be expected, and ORC provides a nice intermediary
+step to performance improvements until a specific hand-tuned
+proto-kernel can be made for a given platform.
+
+See <a
+href="http://gnuradio.org/redmine/projects/gnuradio/wiki/Volk">Volk on
+gnuradio.org</a> for details on the Volk naming scheme.
+
+
+\section volk_alignment Setting and Using Memory Alignment Information
+
+For Volk to work as best as possible, we want to use memory-aligned
+SIMD calls, which means we have to have some way of knowing and
+controlling the alignment of the buffers passed to gr_block's work
+function. We set the alignment requirement for SIMD aligned memory
+calls with:
+
+\code
+ const int alignment_multiple =
+ volk_get_alignment() / output_item_size;
+ set_alignment(std::max(1,alignment_multiple));
+\endcode
+
+The Volk function 'volk_get_alignment' provides the alignment of the
+the machine architecture. We then base the alignment on the number of
+output items required to maintain the alignment, so we divide the
+number of alignment bytes by the number of bytes in an output items
+(sizeof(float), sizeof(gr_complex), etc.). This value is then set per
+block with the 'set_alignment' function.
+
+Because the scheduler tries to optimize throughput, the number of
+items available per call to work will change and depends on the
+availability of the read and write buffers. This means that it
+sometimes cannot produce a buffer that is properly memory
+aligned. This is an inevitable consequence of the scheduler
+system. Instead of requiring alignment, the scheduler enforces the
+alignment as much as possible, and when a buffer becomes unaligned,
+the scheduler will work to correct it as much as possible. If a
+block's buffers are unaligned, then, the scheduler sets a flag to
+indicate as much so that the block can then decide what best to
+do. The next section discusses the use of the aligned/unaligned
+information in a gr_block's work function.
+
+
+\section volk_work Using Alignment Properties in Work()
+
+The buffers passed to work/general_work in a gr_block are not
+guaranteed to be aligned, but they will mostly be aligned whenever
+possible. When not aligned, the 'is_unaligned()' flag will be set. So
+a block can know if its buffers are aligned and make the right
+decisions. This looks like:
+
+\code
+int
+gr_some_block::work (int noutput_items,
+ gr_vector_const_void_star &input_items,
+ gr_vector_void_star &output_items)
+{
+ const float *in = (const float *) input_items[0];
+ float *out = (float *) output_items[0];
+
+ if(is_unaligned()) {
+ // do something with unaligned data. This can either be a manual
+ // handling of the items or a call to an unaligned Volk function.
+ volk_32f_something_32f_u(out, in, noutput_items);
+ }
+ else {
+ // Buffers are aligned; can call the aligned Volk function.
+ volk_32f_something_32f_a(out, in, noutput_items);
+ }
+
+ return noutput_items;
+}
+\endcode
+
+
+
+\section volk_tuning Tuning Volk Performance
+
+VOLK comes with a profiler that will build a config file for the best
+SIMD architecture for your processor. Run volk_profile that is
+installed into $PREFIX/bin. This program tests all known VOLK kernels
+for each architecture supported by the processor. When finished, it
+will write to $HOME/.volk/volk_config the best architecture for the
+VOLK function. This file is read when using a function to know the
+best version of the function to execute.
+
+\subsection volk_hand_tuning Hand-Tuning Performance
+
+If you know a particular architecture works best for your processor,
+you can specify the particular architecture to use in the VOLK
+preferences file: $HOME/.volk/volk_config
+
+The file looks like:
+
+\code
+ volk_<FUNCTION_NAME> <ARCHITECTURE>
+\endcode
+
+Where the "FUNCTION_NAME" is the particular function that you want to
+over-ride the default value and "ARCHITECTURE" is the VOLK SIMD
+architecture to use (generic, sse, sse2, sse3, avx, etc.). For
+example, the following config file tells VOLK to use SSE3 for the
+aligned and unaligned versions of a function that multiplies two
+complex streams together.
+
+\code
+ volk_32fc_x2_multiply_32fc_a sse3
+ volk_32fc_x2_multiply_32fc_u sse3
+\endcode
+
+\b Tip: if benchmarking GNU Radio blocks, it can be useful to have a
+volk_config file that sets all architectures to 'generic' as a way to
+test the vectorized versus non-vectorized implementations.
+
+*/
+
+
diff --git a/docs/doxygen/other/group_defs.dox b/docs/doxygen/other/group_defs.dox
new file mode 100644
index 000000000..4aee49ec8
--- /dev/null
+++ b/docs/doxygen/other/group_defs.dox
@@ -0,0 +1,68 @@
+/*!
+ * \defgroup block GNU Radio C++ Signal Processing Blocks
+ * \brief All C++ blocks that can be used in GR graphs are listed here or in
+ * the subcategories below.
+ *
+ * Sorry, at this time the Python hierarchical blocks are not included
+ * in this index.
+ * @{
+ */
+
+/*! \defgroup container_blk Top Block and Hierarchical Block Base Classes */
+/*! \defgroup source_blk Signal Sources */
+/*! \defgroup sink_blk Signal Sinks */
+/*! \defgroup filter_blk Filters */
+/*! \defgroup math_blk Mathematics */
+/*! \defgroup modulation_blk Signal Modulation */
+/*! \defgroup demodulation_blk Signal Demodulation */
+/*! \defgroup coding_blk Information Coding and Decoding*/
+/*! \defgroup sync_blk Synchronization */
+/*! \defgroup eq_blk Equalization */
+/*! \defgroup converter_blk Type Conversions */
+/*! \defgroup level_blk Signal Level Control (AGC) */
+/*! \defgroup dft_blk Fourier Transform */
+/*! \defgroup wavelet_blk Wavelet Transform */
+/*! \defgroup ofdm_blk OFDM Blocks */
+/*! \defgroup pager_blk Pager Blocks */
+/*! \defgroup misc_blk Miscellaneous Blocks */
+/*! \defgroup slicedice_blk Slicing and Dicing Streams */
+/*! \defgroup vocoder_blk Voice Encoders and Decoders */
+/*! \defgroup digital Digital Modulation Blocks */
+/*! \defgroup analog Analog Communications Blocks */
+/*! \defgroup qtgui_blk QT Graphical Interfaces */
+/*! \defgroup uhd_blk UHD Interface */
+/*! \defgroup audio_blk Audio Interface */
+/*! \defgroup pfb_blk Polyphase Filterbank */
+/*! \defgroup snr_blk SNR estimators */
+
+/*!
+ * \defgroup base_blk Base classes for GR Blocks
+ * \brief All C++ blocks are derived from these base classes
+ */
+
+
+/*! @} */
+
+/*! \defgroup filter_design Digital Filter Design */
+/*! \defgroup misc Miscellaneous */
+/*! \defgroup internal Implementation Details */
+
+/*!
+ * \defgroup applications Applications
+ * These are some applications build using gnuradio...
+ * @{
+ */
+
+/*!
+ * \defgroup atsc ATSC
+ * ATSC Applications...
+ */
+
+/*!
+ * \defgroup pager Pager
+ * Pager Applications
+ */
+
+/*! @} */
+
+/*! \defgroup hardware Misc Hardware Control */
diff --git a/docs/doxygen/other/main_page.dox b/docs/doxygen/other/main_page.dox
new file mode 100644
index 000000000..e7d4685f7
--- /dev/null
+++ b/docs/doxygen/other/main_page.dox
@@ -0,0 +1,405 @@
+/*! \mainpage
+
+\image html gnuradio-logo.png
+
+Welcome to GNU Radio!
+
+For details about GNU Radio and using it, please see the <a
+href="http://gnuradio.org" target="_blank"><b>main project page</b></a>.
+
+Other information about the project and discussion about GNU Radio,
+software radio, and communication theory in general can be found at
+the <a href="http://gnuradio.squarespace.com" target="_blank"><b>GNU Radio blog</b></a>.
+
+
+\section build Building GNU Radio
+
+See the \ref build_guide page for details about the project's
+dependencies and build process.
+
+
+\section blocks GNU Radio Blocks
+
+GNU Radio uses discrete signal processing blocks that are connected
+together to perform your signal processing application. This manual
+contain a list of all GNU Radio <a href="modules.html"><b>C++ Blocks</b></a>.
+
+Please note that at this time, we haven't found an acceptable way to
+provide unified documentation for the C++ parts of the system and the
+parts written in Python (mostly hierarchical blocks). Until this gets
+worked out, please bear with us, or better yet, solve it for us!
+
+
+\section toc Manual Contents
+More details on packages in GNU Radio:
+\li \ref page_audio
+\li \ref page_digital
+\li \ref page_qtgui
+\li \ref page_uhd
+\li \ref page_vocoder
+
+More details on GNU Radio concepts:
+\li \ref page_pmt
+\li \ref page_msg_passing
+\li \ref page_metadata
+\li \ref volk_guide
+\li \ref page_pfb
+
+
+\section flowgraph Operating a Flowgraph
+
+The basic data structure in GNU Radio is the flowgraph, which
+represents the connections of the blocks through which a continuous
+stream of samples flows. The concept of a flowgraph is an acyclic
+directional graph with one or more source blocks (to insert samples
+into the flowgraph), one or more sink blocks (to terminate or export
+samples from the flowgraph), and any signal processing blocks in
+between.
+
+A program must at least create a GNU Radio 'top_block', which
+represents the top-most structure of the flowgraph. The top blocks
+provide the overall control and hold methods such as 'start,' 'stop,'
+and 'wait.'
+
+The general construction of a GNU Radio application is to create a
+gr_top_block, instantiate the blocks, connect the blocks together, and
+then start the gr_top_block. The following program shows how this is
+done. A single source and sink are used with a FIR filter between
+them.
+
+\code
+ from gnuradio import gr, filter
+
+ class my_topblock(gr.top_block):
+ def __init__(self):
+ gr.top_block.__init__(self)
+
+ amp = 1
+ taps = filter.firdes.low_pass(1, 1, 0.1, 0.01)
+
+ self.src = gr.noise_source_c(gr.GR_GAUSSIAN, amp)
+ self.flt = filter.fir_filter_ccf(1, taps)
+ self.snk = gr.null_sink(gr.sizeof_gr_complex)
+
+ self.connect(self.src, self.flt, self.snk)
+
+ if __name__ == "__main__":
+ tb = my_topblock()
+ tb.start()
+ tb.wait()
+\endcode
+
+The 'tb.start()' starts the data flowing through the flowgraph while
+the 'tb.wait()' is the equivalent of a thread's 'join' operation and
+blocks until the gr_top_block is done.
+
+An alternative to using the 'start' and 'wait' methods, a 'run' method is
+also provided for convenience that is a blocking start call;
+equivalent to the above 'start' followed by a 'wait.'
+
+
+\subsection latency Latency and Throughput
+
+By default, GNU Radio runs a scheduler that attempts to optimize
+throughput. Using a dynamic scheduler, blocks in a flowgraph pass
+chunks of items from sources to sinks. The sizes of these chunks will
+vary depending on the speed of processing. For each block, the number
+of items is can process is dependent on how much space it has in its
+output buffer(s) and how many items are available on the input
+buffer(s).
+
+The consequence of this is that often a block may be called with a very
+large number of items to process (several thousand). In terms of
+speed, this is efficient since now the majority of the processing time
+is taken up with processing samples. Smaller chunks mean more calls
+into the scheduler to retrieve more data. The downside to this is that
+it can lead to large latency while a block is processing a large chunk
+of data.
+
+To combat this problem, the gr_top_block can be passed a limit on the
+number of output items a block will ever receive. A block may get less
+than this number, but never more, and so it serves as an upper limit
+to the latency any block will exhibit. By limiting the number of items
+per call to a block, though, we increase the overhead of the
+scheduler, and so reduce the overall efficiency of the application.
+
+To set the maximum number of output items, we pass a value into the
+'start' or 'run' method of the gr_top_block:
+
+\code
+ tb.start(1000)
+ tb.wait()
+or
+ tb.run(1000)
+\endcode
+
+Using this method, we place a global restriction on the size of items
+to all blocks. Each block, though, has the ability to overwrite this
+with its own limit. Using the 'set_max_noutput_items(m)' method for an
+individual block will overwrite the global setting. For example, in
+the following code, the global setting is 1000 items max, except for
+the FIR filter, which can receive up to 2000 items.
+
+\code
+ tb.flt.set_max_noutput_items(2000)
+ tb.run(1000)
+\endcode
+
+In some situations, you might actually want to restrict the size of
+the buffer itself. This can help to prevent a buffer who is blocked
+for data from just increasing the amount of items in its buffer, which
+will then cause an increased latency for new samples. You can set the
+size of an output buffer for each output port for every block.
+
+WARNING: This is an advanced feature in GNU Radio and should not be
+used without a full understanding of this concept as explained below.
+
+To set the output buffer size of a block, you simply call:
+
+\code
+ tb.blk0.set_max_output_buffer(2000)
+ tb.blk1.set_max_output_buffer(1, 2000)
+ tb.start()
+ print tb.blk1.max_output_buffer(0)
+ print tb.blk1.max_output_buffer(1)
+\endcode
+
+In the above example, all ports of blk0 are set to a buffer size of
+2000 in _items_ (not bytes), and blk1 only sets the size for output
+port 1, any and all other ports use the default. The third and fourth
+lines just print out the buffer sizes for ports 0 and 1 of blk1. This
+is done after start() is called because the values are updated based
+on what is actually allocated to the block's buffers.
+
+NOTES:
+
+1. Buffer length assignment is done once at runtime (i.e., when run()
+or start() is called). So to set the max buffer lengths, the
+set_max_output_buffer calls must be done before this.
+
+2. Once the flowgraph is started, the buffer lengths for a block are
+set and cannot be dynamically changed, even during a
+lock()/unlock(). If you need to change the buffer size, you will have
+to delete the block and rebuild it, and therefore must disconnect and
+reconnect the blocks.
+
+3. This can affect throughput. Large buffers are designed to improve
+the efficiency and speed of the program at the expense of
+latency. Limiting the size of the buffer may decrease performance.
+
+4. The real buffer size is actually based on a minimum granularity of
+the system. Typically, this is a page size, which is typically 4096
+bytes. This means that any buffer size that is specified with this
+command will get rounded up to the nearest granularity (e.g., page)
+size. When calling max_output_buffer(port) after the flowgraph is
+started, you will get how many items were actually allocated in the
+buffer, which may be different than what was initially specified.
+
+
+\section reconfigure Reconfiguring Flowgraphs
+
+It is possible to reconfigure the flowgraph at runtime. The
+reconfiguration is meant for changes in the flowgraph structure, not
+individual parameter settings of the blocks. For example, changing the
+constant in a gr_add_const_cc block can be done while the flowgraph is
+running using the 'set_k(k)' method.
+
+Reconfiguration is done by locking the flowgraph, which stops it from
+running and processing data, performing the reconfiguration, and then
+restarting the graph by unlocking it.
+
+The following example code shows a graph that first adds two
+gr_noise_source_c blocks and then replaces the gr_add_cc block with a
+gr_sub_cc block to then subtract the sources.
+
+\code
+from gnuradio import gr
+import time
+
+class mytb(gr.top_block):
+ def __init__(self):
+ gr.top_block.__init__(self)
+
+ self.src0 = gr.noise_source_c(gr.GR_GAUSSIAN, 1)
+ self.src1 = gr.noise_source_c(gr.GR_GAUSSIAN, 1)
+ self.add = gr.add_cc()
+ self.sub = gr.sub_cc()
+ self.head = gr.head(gr.sizeof_gr_complex, 1000000)
+ self.snk = gr.file_sink(gr.sizeof_gr_complex, "output.32fc")
+
+ self.connect(self.src0, (self.add,0))
+ self.connect(self.src1, (self.add,1))
+ self.connect(self.add, self.head)
+ self.connect(self.head, self.snk)
+
+def main():
+ tb = mytb()
+ tb.start()
+ time.sleep(0.01)
+
+ # Stop flowgraph and disconnect the add block
+ tb.lock()
+ tb.disconnect(tb.add, tb.head)
+ tb.disconnect(tb.src0, (tb.add,0))
+ tb.disconnect(tb.src1, (tb.add,1))
+
+ # Connect the sub block and restart
+ tb.connect(tb.sub, tb.head)
+ tb.connect(tb.src0, (tb.sub,0))
+ tb.connect(tb.src1, (tb.sub,1))
+ tb.unlock()
+
+ tb.wait()
+
+if __name__ == "__main__":
+ main()
+\endcode
+
+During reconfiguration, the maximum noutput_items value can be changed
+either globally using the 'set_max_noutput_items(m)' on the gr_top_block
+object or locally using the 'set_max_noutput_items(m)' on any given
+block object.
+
+A block also has a 'unset_max_noutput_items()' method that unsets the
+local max noutput_items value so that block reverts back to using the
+global value.
+
+The following example expands the previous example but sets and resets
+the max noutput_items both locally and globally.
+
+\code
+from gnuradio import gr
+import time
+
+class mytb(gr.top_block):
+ def __init__(self):
+ gr.top_block.__init__(self)
+
+ self.src0 = gr.noise_source_c(gr.GR_GAUSSIAN, 1)
+ self.src1 = gr.noise_source_c(gr.GR_GAUSSIAN, 1)
+ self.add = gr.add_cc()
+ self.sub = gr.sub_cc()
+ self.head = gr.head(gr.sizeof_gr_complex, 1000000)
+ self.snk = gr.file_sink(gr.sizeof_gr_complex, "output.32fc")
+
+ self.connect(self.src0, (self.add,0))
+ self.connect(self.src1, (self.add,1))
+ self.connect(self.add, self.head)
+ self.connect(self.head, self.snk)
+
+def main():
+ # Start the gr_top_block after setting some max noutput_items.
+ tb = mytb()
+ tb.src1.set_max_noutput_items(2000)
+ tb.start(100)
+ time.sleep(0.01)
+
+ # Stop flowgraph and disconnect the add block
+ tb.lock()
+
+ tb.disconnect(tb.add, tb.head)
+ tb.disconnect(tb.src0, (tb.add,0))
+ tb.disconnect(tb.src1, (tb.add,1))
+
+ # Connect the sub block
+ tb.connect(tb.sub, tb.head)
+ tb.connect(tb.src0, (tb.sub,0))
+ tb.connect(tb.src1, (tb.sub,1))
+
+ # Set new max_noutput_items for the gr_top_block
+ # and unset the local value for src1
+ tb.set_max_noutput_items(1000)
+ tb.src1.unset_max_noutput_items()
+ tb.unlock()
+
+ tb.wait()
+
+if __name__ == "__main__":
+ main()
+\endcode
+
+
+\section volk_main Using Volk in GNU Radio
+
+The \ref volk_guide page provides an overview of how to incorporate
+and use Volk in GNU Radio blocks.
+
+Many blocks have already been converted to use Volk in their calls, so
+they can also serve as examples. See the gr_complex_to_xxx.h file for
+examples of various blocks that make use of Volk.
+
+
+\section prefs Configuration / Preference Files
+
+GNU Radio defines some of its basic behavior through a set of
+configuration files located in
+${prefix}/etc/gnuradio/conf.d. Different components have different
+files listed in here for the various properties. These will be read
+once when starting a GNU Radio application, so updates during runtime
+will not affect them.
+
+The configuration files use the following format:
+
+\code
+# Stuff from section 1
+[section1]
+var1 = value1
+var2 = value2 # value of 2
+
+# Stuff from section 2
+[section2]
+var3 = value3
+\endcode
+
+In this file, the hash mark ('#') indicates a comment and blank lines
+are ignored. Section labels are defined inside square brackets as a
+group distinguisher. All options must be associated with a section
+name. The options are listed one per line with the option name is
+given followed by an equals ('=') sign and then the value. All section
+and option names must not have white spaces (actually, all white
+spaces are ignored).
+
+The value of an option can be a string or number and retrieved through
+a few different interfaces. There is a single preference object
+created when GNU Radio is launched. In Python, you can get this by
+making a new variable:
+
+\code
+p = gr.prefs()
+\endcode
+
+Similarly, in C++, we get a reference to the object by explicitly
+calling for the singleton of the object:
+
+\code
+ gr_prefs *p = gr_prefs::singleton();
+\endcode
+
+The methods associated with this preferences object are (from class gr_prefs):
+
+\code
+ bool has_section(string section)
+ bool has_option(string section, string option)
+ string get_string(string section, string option, string default_val)
+ bool get_bool(string section, string option, bool default_val)
+ long get_long(string section, string option, long default_val)
+ double get_double(string section, string option, double default_val)
+\endcode
+
+When setting a Boolean value, we can use 0, 1, "True", "true",
+"False", "false", "On", "on", "Off", and "off".
+
+All configuration preferences in these files can also be overloaded by
+an environmental variable. The environmental variable is named based
+on the section and option name from the configuration file as:
+
+\code
+ GR_CONF_<SECTION>_<OPTION> = <value>
+\endcode
+
+The "GR_CONF_" is a prefix to identify this as a GNU Radio
+configuration variable and the section and option names are in
+uppercase. The value is the same format that would be used in the
+config file itself.
+
+*/
diff --git a/docs/doxygen/other/metadata.dox b/docs/doxygen/other/metadata.dox
new file mode 100644
index 000000000..b527b2100
--- /dev/null
+++ b/docs/doxygen/other/metadata.dox
@@ -0,0 +1,330 @@
+/*! \page page_metadata Metadata Information
+
+\section Introduction
+
+Metadata files have extra information in the form of headers that
+carry metadata about the samples in the file. Raw, binary files carry
+no extra information and must be handled delicately. Any changes in
+the system state such as sample rate or if a receiver's frequency are
+not conveyed with the data in the file itself. Header of metadata
+solve this problem.
+
+We write metadata files using gr::blocks::file_meta_sink and read metadata
+files using gr::blocks::file_meta_source.
+
+Metadata files have headers that carry information about a segment of
+data within the file. The header structure is described in detail in
+the next section. A metadata file always starts with a header that
+describes the basic structure of the data. It contains information
+about the item size, data type, if it's complex, the sample rate of
+the segment, the time stamp of the first sample of the segment, and
+information regarding the header size and segment size.
+
+Headers have two main tags associated with them:
+
+- rx_rate: the sample rate of the stream.
+- rx_time: the time stamp of the first item in the segment.
+
+These tags were inspired by the UHD tag format.
+
+The header gives enough information to process and handle the
+data. One cautionary note, though, is that the data type should never
+change within a file. There should be very little need for this, but
+more importantly. GNU Radio blocks can only set the data type of their
+IO signatures in the constructor, so changes in the data type
+afterward will not be recognized.
+
+We also have an extra header segment that is option. This can be
+loaded up at the beginning by the user specifying some extra metadata
+that should be transmitted along with the data. It also grows whenever
+it sees a stream tag, so the dictionary will contain and key:value
+pairs out of tags from the flowgraph.
+
+
+\subsection types Types of Metadata Files
+
+GNU Radio currently supports two types of metadata files:
+
+- inline: headers are inline with the data in the same file.
+- detached: headers are in a separate header file from the data.
+
+The inline method is the standard version. When a detached header is
+used, the headers are simply inserted back-to-back in the detached
+header file. The dat file, then, is the standard raw binary format
+with no interruptions in the data.
+
+
+\subsection updating Updating Headers
+
+While there is always a header that starts a metadata file, they are
+updated throughout as well. There are two events that trigger a new
+header. We define a segment as the unit of data associated with the
+last header.
+
+The first event that will trigger a new header is when enough samples
+have been written for the given segment. This number is defined as the
+maximum segment size and is a parameter we pass to the
+file_meta_sink. It defaults to 1 million items (items, not
+bytes). When that number of items is reached, a new header is
+generated and a new segment is started. This makes it easier for us to
+manipulate the data later and helps protect against catastrophic data
+loss.
+
+The second event to trigger a new segment is if a new tag is
+observed. If the tag is a standard tag in the header, the header value
+is updated, the header and current extras are written to file, and the
+segment begins again. If a tag from the extras is seen, the value
+associated with that tag is updated; and if a new tag is seen, a new
+key:value pair are added to the extras dictionary.
+
+When new tags are seen, we generate a new segment so that we make sure
+that all samples in that segment are defined by the header. If the
+sample rate changes, we create a new segment where all of the new
+samples are at that new rate. Also, in the case of UHD devices, if a
+segment loss is observed, it will generate a new timestamp as a tag of
+'rx_time'. We create a new file segment that reflects this change to
+keep the sample times exact.
+
+
+\subsection implementation Implementation
+
+Metadata files are created using gr::blocks::file_meta_sink. The
+default behavior is to create a single file with inline headers as
+metadata. An option can be set to switch to detached header mode.
+
+Metadata file are read into a flowgraph using
+gr::blocks::file_meta_source. This source reads a metadata file,
+inline by default with a settable option to use detached headers. The
+data from the segments is converted into a standard streaming
+output. The 'rx_rate' and 'rx_time' and all key:value pairs in the
+extra header are converted into tags and added to the stream tags
+interface.
+
+
+\section structure Structure
+
+The file metadata consists of a static mandatory header and a dynamic
+optional extras header. Each header is a separate PMT
+dictionary. Headers are created by building a PMT dictionary
+(pmt::pmt_make_dict) of key:value pairs, then the dictionary is
+serialized into a string to be written to file. The header is always
+the same length that is predetermined by the version of the header
+(this must be known already). The header will then indicate if there
+is an extra data to be extracted as a separate serialized dictionary.
+
+To work with the PMTs for creating and extracting header information,
+we use PMT operators. For example, we create a simplified version of
+the header in C++ like this:
+
+\code
+ using namespace pmt;
+ const char METADATA_VERSION = 0x0;
+ pmt_t header;
+ header = pmt_make_dict();
+ header = pmt_dict_add(header, mp("version"), mp(METADATA_VERSION));
+ header = pmt_dict_add(header, mp("rx_rate"), mp(samp_rate));
+ std::string hdr_str = pmt_serialize_str(header);
+\endcode
+
+The call to pmt::pmt_dict_add adds a new key:value pair to the
+dictionary. Notice that it both takes and returns the 'header'
+variable. This is because we are actually creating a new dictionary
+with this function, so we just assign it to the same variable.
+
+The 'mp' functions are convenience functions provided by the PMT
+library. They interpret the data type of the value being inserted and
+call the correct 'pmt_from_xxx' function. For more direct control over
+the data type, see PMT functions in pmt.h, such as
+pmt::pmt_from_uint64 or pmt::pmt_from_double.
+
+We finish this off by using pmt::pmt_serialize_str to convert the PMT
+dictionary into a specialized string format that makes it easy to
+write to a file.
+
+The header is always METADATA_HEADER_SIZE bytes long and a metadata
+file always starts with a header. So to extract the header from a
+file, we need to read in this many bytes from the beginning of the
+file and deserialize it. An important note about this is that the
+deserialize function must operate on a std::string. The serialized
+format of a dictionary contains null characters, so normal C character
+arrays (e.g., 'char *s') get confused.
+
+Assuming that 'std::string str' contains the full string as read from
+a file, we can access the dictionary in C++ like this:
+
+\code
+ pmt_t hdr = pmt_deserialize_str(str);
+ if(pmt_dict_has_key(hdr, pmt_string_to_symbol("strt"))) {
+ pmt_t r = pmt_dict_ref(hdr, pmt_string_to_symbol("strt"), PMT_NIL);
+ uint64_t seg_start = pmt_to_uint64(r);
+ uint64_t extra_len = seg_start - METADATA_HEADER_SIZE;
+ }
+\endcode
+
+This example first deserializes the string into a PMT dictionary
+again. This will throw an error if the string is malformed and cannot
+be deserialized correctly. We then want to get access to the item with
+key 'strt'. As the next subsection will show, this value indicates at
+which byte the data segment starts. We first check to make sure that
+this key exists in the dictionary. If not, our header does not contain
+the correct information and we might want to handle this as an error.
+
+Assuming the header is properly formatted, we then get the particular
+item referenced by the key 'strt'. This is a uint64_t, so we use the
+PMT function to extract and convert this value properly. We now know
+if we have an extra header in the file by looking at the difference
+between 'seg_start' and the static header size,
+METADATA_HEADER_SIZE. If the 'extra_len' is greater than 0, we know we
+have an extra header that we can process. Moreover, this also tells us
+the size of the serialized PMT dictionary in bytes, so we can easily
+read this many bytes from the file. We can then deserialize and parse
+this header just like the first.
+
+
+\subsection header Header Information
+
+The header is a PMT dictionary with a known structure. This structure
+may change, but we version the headers, so all headers of version X
+must be the same length and structure. As of now, we only have version
+0 headers, which look like the following:
+
+- version: (char) version number (usually set to METADATA_VERSION)
+- rx_rate: (double) Stream's sample rate
+- rx_time: (pmt::pmt_t pair - (uint64_t, double)) Time stamp (format from UHD)
+- size: (int) item size in bytes - reflects vector length if any.
+- type: (int) data type (enum below)
+- cplx: (bool) true if data is complex
+- strt: (uint64_t) start of data relative to current header
+- bytes: (uint64_t) size of following data segment in bytes
+
+The data types are indicated by an integer value from the following
+enumeration type:
+
+\code
+enum gr_file_types {
+ GR_FILE_BYTE=0,
+ GR_FILE_CHAR=0,
+ GR_FILE_SHORT=1,
+ GR_FILE_INT,
+ GR_FILE_LONG,
+ GR_FILE_LONG_LONG,
+ GR_FILE_FLOAT,
+ GR_FILE_DOUBLE,
+};
+\endcode
+
+\subsection extras Extras Information
+
+The extras section is an optional segment of the header. If 'strt' ==
+METADATA_HEADER_SIZE, then there is no extras. Otherwise, it is simply
+a PMT dictionary of key:value pairs. The extras header can contain
+anything and can grow while a program is running.
+
+We can insert extra data into the header at the beginning if we
+wish. All we need to do is use the pmt::pmt_dict_add function to insert
+our hand-made metadata. This can be useful to add our own markers and
+information.
+
+The main role of the extras header, though, is as a container to hold
+any stream tags. When a stream tag is observed coming in, the tag's
+key and value are added to the dictionary. Like a standard dictionary,
+any time a key already exists, the value will be updated. If the key
+does not exist, a new entry is created and the new key:value pair are
+added together. So any new tags that the file metadata sink sees will
+add to the dictionary. It is therefore important to always check the
+'strt' value of the header to see if the length of the extras
+dictionary has changed at all.
+
+When reading out data from the extras, we do not necessarily know the
+data type of the PMT value. The key is always a PMT symbol, but the
+value can be any other PMT type. There are PMT functions that allow us
+to query the PMT to test if it is a particular type. We also have the
+ability to do pmt::pmt_print on any PMT object to print it to
+screen. Before converting from a PMT to it's natural data type, it is
+necessary to know the data type.
+
+
+\section Utilities
+
+GNU Radio comes with a couple of utilities to help in debugging and
+manipulating metadata files. There is a general parser in Python that
+will convert the PMT header and extra header into Python
+dictionaries. This utility is:
+
+- gr-blocks/python/parse_file_metadata.py
+
+This program is installed into the Python directory under the
+'gnuradio' module, so it can be accessed with:
+
+\code
+from gnuradio.blocks import parse_file_metadata
+\endcode
+
+It defines HEADER_LENGTH as the static length of the metadata header
+size. It also has dictionaries that can be used to convert from the
+file type to a string (ftype_to_string) and one to convert from the
+file type to the size of the data type in bytes (ftype_to_size).
+
+The 'parse_header' takes in a PMT dictionary, parses it, and returns a
+Python dictionary. An optional 'VERBOSE' bool can be set to print the
+information to standard out.
+
+The 'parse_extra_dict' is similar in that it converts from a PMT
+dictionary to a Python dictionary. The values are kept in their PMT
+format since we do not necessarily know the native data type.
+
+A program called 'gr_read_file_metadata' is installed into the path
+and can be used to read out all header information from a metadata
+file. This program is just called with the file name as the first
+command-line argument. An option '-D' will handle detached header
+files where the file of headers is expected to be the file name of the
+data with '.hdr' appended to it.
+
+
+\section Examples
+
+Examples are located in:
+
+- gr-blocks/examples/metadata
+
+Currently, there are a few GRC example programs.
+
+- file_metadata_sink: create a metadata file from UHD samples.
+- file_metadata_source: read the metadata file as input to a simple graph.
+- file_metadata_vector_sink: create a metadata file from UHD samples.
+- file_metadata_vector_source: read the metadata file as input to a simple graph.
+
+The file sink example can be switched to use a signal source instead
+of a UHD source, but no extra tagged data is used in this mode.
+
+The file source example pushes the data stream to a new raw file while
+a tag debugger block prints out any tags observed in the metadata
+file. A QT GUI time sink is used to look at the signal as well.
+
+The versions with 'vector' in the name are similar except they use
+vectors of data.
+
+The following shows a simple way of creating extra metadata for a
+metadata file. This example is just showing how we can insert a date
+into the metadata to keep track of later. The date in this case is
+encoded as a vector of uint16 with [day, month, year].
+
+\code
+ from gruel import pmt
+ from gnuradio import blocks
+
+ key = pmt.pmt_intern("date")
+ val = pmt.pmt_init_u16vector(3, [13,12,2012])
+
+ extras = pmt.pmt_make_dict()
+ extras = pmt.pmt_dict_add(extras, key, val)
+ extras_str = pmt.pmt_serialize_str(extras)
+ self.sink = blocks.file_meta_sink(gr.sizeof_gr_complex,
+ "/tmp/metadat_file.out",
+ samp_rate, 1,
+ blocks.GR_FILE_FLOAT, True,
+ 1000000, extra_str, False)
+
+\endcode
+
+*/
diff --git a/docs/doxygen/other/msg_passing.dox b/docs/doxygen/other/msg_passing.dox
new file mode 100644
index 000000000..aea0ac94a
--- /dev/null
+++ b/docs/doxygen/other/msg_passing.dox
@@ -0,0 +1,269 @@
+/*! \page page_msg_passing Message Passing
+
+\section intro Introduction
+
+GNU Radio was originally a streaming system with no other mechanism to
+pass data between blocks. Streams of data are a model that work well
+for samples, bits, etc., but are not really the right mechanism for
+control data, metadata, and, often, packet structures (at least at
+some point in the processing chain).
+
+We solved part of this problem a few years ago by introducing the tag
+stream. This is a parallel stream to the data streaming. The
+difference is that tags are designed to hold metadata and control
+information. Tags are specifically associated with a particular sample
+in the data stream and flow downstream alongside the data. This model
+allows other blocks to identify that an event or action has occurred
+or should occur on a particular item. The major limitation is that the
+tag stream is really only accessible inside a work function and only
+flows in one direction. Its benefit is that it is isosynchronous with
+the data.
+
+We want a more general message passing system for a couple of
+reasons. The first is to allow blocks downstream to communicate back
+to blocks upstream. The second is to allow an easier way for us to
+communicate back and forth between external applications and GNU
+Radio. The new message passing interface handles these cases, although
+it does so on an asynchronous basis.
+
+The message passing interface heavily relies on Polymorphic Types
+(PMTs) in GNU Radio. For further information about these data
+structures, see the page \ref page_pmt.
+
+\section api Message Passing API
+
+The message passing interface is designed into the gr_basic_block,
+which is the parent class for all blocks in GNU Radio. Each block has
+a set of message queues to hold incoming messages and can post
+messages to the message queues of other blocks. The blocks also
+distinguish between input and output ports.
+
+A block has to declare its input and output message ports in its
+constructor. The message ports are described by a name, which is in
+practice a PMT symbol (<em>i.e.</em>, an interned string). The API calls
+to register a new port are:
+
+\code
+ void message_port_register_in(pmt::pmt_t port_id)
+ void message_port_register_out(pmt::pmt_t port_id)
+\endcode
+
+The ports are now identifiable by that port name. Other blocks who may
+want to post or receive messages on a port must subscribe to it. When
+a block has a message to send, they are published on a particular
+port. The subscribe and publish API looks like:
+
+\code
+ void message_port_pub(pmt::pmt_t port_id,
+ pmt::pmt_t msg);
+ void message_port_sub(pmt::pmt_t port_id,
+ pmt::pmt_t target);
+ void message_port_unsub(pmt::pmt_t port_id,
+ pmt::pmt_t target);
+\endcode
+
+Any block that has a subscription to another block's output message
+port will receive the message when it is published. Internally, when a
+block publishes a message, it simply iterates through all blocks that
+have subscribed and uses the gr_basic_block::_post method to send the
+message to that block's message queue.
+
+From the flowgraph level, we have instrumented a gr_hier_block2::msg_connect
+method to make it easy to subscribe blocks to other blocks'
+messages. The message connection method looks like the following
+code. Assume that the block \b src has an output message port named
+\a pdus and the block \b dbg has an input port named \a print.
+
+\code
+ self.tb.msg_connect(src, "pdus", dbg, "print")
+\endcode
+
+All messages published by the \b src block on port \a pdus will be
+received by \b dbg on port \a print. Note here how we are just using
+strings to define the ports, not PMT symbols. This is a convenience to
+the user to be able to more easily type in the port names (for
+reference, you can create a PMT symbol in Python using the
+pmt::pmt_intern function as pmt.pmt_intern("string")).
+
+Users can also query blocks for the names of their input and output
+ports using the following API calls:
+
+\code
+ pmt::pmt_t message_ports_in();
+ pmt::pmt_t message_ports_out();
+\endcode
+
+The return value for these are a PMT vector filled with PMT symbols,
+so PMT operators must be used to manipulate them.
+
+Each block has internal methods to handle posting and receiving of
+messages. The gr_basic_block::_post method takes in a message and
+places it into its queue. The publishing model uses the
+gr_basic_block::_post method of the blocks as the way to access the
+message queue. So the message queue of the right name will have a new
+message. Posting messages also has the benefit of waking up the
+block's thread if it is in a wait state. So if idle, as soon as a
+message is posted, it will wake up and and call the message handler.
+
+The other side of the action in a block is in the message
+handler. When a block has an input message port, it needs a callback
+function to handle messages received on that port. We use a Boost bind
+operator to bind the message port to the message handling
+function. When a new message is pushed onto a port's message queue,
+it is this function that is used to process the message.
+
+
+\section examples Code Examples
+
+The following is snippets of code from blocks current in GNU Radio
+that take advantage of message passing. We will be using
+gr_message_debug and gr_tagged_stream_to_pdu below to show setting up
+both input and output message passing capabilities.
+
+The gr_message_debug block is used for debugging the message passing
+system. It describes two input message ports: \a print and \a
+store. The \a print port simply prints out all messages to standard
+out while the \a store port keeps a list of all messages posted to
+it. This latter port works in conjunction with a
+gr_message_debug::get_message(int i) call that allows us to retrieve
+message \p i afterward.
+
+The constructor of this block looks like this:
+
+\code
+{
+ message_port_register_in(pmt::mp("print"));
+ set_msg_handler(pmt::mp("print"),
+ boost::bind(&gr_message_debug::print, this, _1));
+
+ message_port_register_in(pmt::mp("store"));
+ set_msg_handler(pmt::mp("store"),
+ boost::bind(&gr_message_debug::store, this, _1));
+}
+\endcode
+
+So the two ports are registered by their respective names. We then use
+the gr_basic_block::set_msg_handler function to identify this
+particular port name with a callback function. The Boost \a bind
+function (<a target="_blank"
+href="http://www.boost.org/doc/libs/1_52_0/libs/bind/bind.html">Boost::bind</a>)
+here binds the callback to a function of this block's class. So now
+the block's gr_message_debug::print and gr_message_debug::store
+functions are assigned to handle messages passed to them. Below is the
+\a print function for reference.
+
+\code
+void
+gr_message_debug::print(pmt::pmt_t msg)
+{
+ std::cout << "***** MESSAGE DEBUG PRINT ********\n";
+ pmt::pmt_print(msg);
+ std::cout << "**********************************\n";
+}
+\endcode
+
+The function simply takes in the PMT message and prints it. The method
+pmt::pmt_print is a function in the PMT library to print the
+PMT in a friendly, (mostly) pretty manner.
+
+The gr_tagged_stream_to_pdu block only defines a single
+output message port. In this case, its constructor looks like:
+
+\code
+{
+ message_port_register_out(pdu_port_id);
+}
+\endcode
+
+So we are only creating a single output port where \a pdu_port_id
+is defined in the file gr_pdu.h as \a pdus.
+
+This blocks purpose is to take in a stream of samples along with
+stream tags and construct a predefined PDU message from this. In GNU
+Radio, we define a PDU as a PMT pair of (metadata, data). The metadata
+describes the samples found in the data portion of the
+pair. Specifically, the metadata can contain the length of the data
+segment and any other information (sample rate, etc.). The PMT vectors
+know their own length, so the length value is not actually necessary
+unless useful for purposes down the line. The metadata is a PMT
+dictionary while the data segment is a PMT uniform vector of either
+bytes, floats, or complex values.
+
+In the end, when a PDU message is ready, the block calls its
+gr_tagged_stream_to_pdu::send_message function that is shown below.
+
+\code
+void
+gr_tagged_stream_to_pdu::send_meassage()
+{
+ if(pmt::pmt_length(d_pdu_vector) != d_pdu_length) {
+ throw std::runtime_error("msg length not correct");
+ }
+
+ pmt::pmt_t msg = pmt::pmt_cons(d_pdu_meta,
+ d_pdu_vector);
+ message_port_pub(pdu_port_id, msg);
+
+ d_pdu_meta = pmt::PMT_NIL;
+ d_pdu_vector = pmt::PMT_NIL;
+ d_pdu_length = 0;
+ d_pdu_remain = 0;
+ d_inpdu = false;
+}
+\endcode
+
+This function does a bit of checking to make sure the PDU is ok as
+well as some cleanup in the end. But it is the line where the message
+is published that is important to this discussion. Here, the block
+posts the PDU message to any subscribers by calling
+gr_basic_block::message_port_pub publishing method.
+
+There is similarly a gr_pdu_to_tagged_stream block that essentially
+does the opposite. It acts as a source to a flowgraph and waits for
+PDU messages to be posted to it on its input port \a pdus. It extracts
+the metadata and data and processes them. The metadata dictionary is
+split up into key:value pairs and stream tags are created out of
+them. The data is then converted into an output stream of items and
+passed along. The next section describes how PDUs can be passed into a
+flowgraph using the gr_pdu_to_tagged_stream block.
+
+\section posting Posting from External Sources
+
+The last feature of the message passing architecture to discuss here
+is how it can be used to take in messages from an external source. We
+can call a block's gr_basic_block::_post method directly and pass it a
+message. So any block with an input message port can receive messages
+from the outside in this way.
+
+The following example uses a gr_pdu_to_tagged_stream block
+as the source block to a flowgraph. Its purpose is to wait for
+messages as PDUs posted to it and convert them to a normal stream. The
+payload will be sent on as a normal stream while the meta data will be
+decoded into tags and sent on the tagged stream.
+
+So if we have created a \b src block as a PDU to stream, it has a \a
+pdus input port, which is how we will inject PDU messages to the
+flowgraph. These PDUs could come from another block or flowgraph, but
+here, we will create and insert them by hand.
+
+\code
+ port = pmt.pmt_intern("pdus")
+ msg = pmt.pmt_cons(pmt.PMT_NIL,
+ pmt.pmt_make_u8vector(16, 0xFF))
+ src.to_basic_block()._post(port, msg)
+\endcode
+
+The PDU's metadata section is empty, hence the pmt::PMT_NIL
+object. The payload is now just a simple vector of 16 bytes of all
+1's. To post the message, we have to access the block's gr_basic_block
+class, which we do using the gr_basic_block::to_basic_block method and
+then call the gr_basic_block::_post method to pass the PDU to the
+right port.
+
+All of these mechanisms are explored and tested in the QA code of the
+file qa_pdu.py.
+
+There are some examples of using the message passing infrastructure
+through GRC in gnuradio-core/src/examples/msg_passing.
+
+*/
diff --git a/docs/doxygen/other/pfb_intro.dox b/docs/doxygen/other/pfb_intro.dox
new file mode 100644
index 000000000..01d08b0fa
--- /dev/null
+++ b/docs/doxygen/other/pfb_intro.dox
@@ -0,0 +1,96 @@
+/*! \page page_pfb Polyphase Filterbanks
+
+\section Introduction
+
+Polyphase filterbanks (PFB) are a very powerful set of filtering tools
+that can efficiently perform many multi-rate signal processing
+tasks. GNU Radio has a set of polyphase filterbank blocks to be used
+in all sorts of applications. These blocks and their documentation can
+be found in \ref pfb_blk.
+
+\section Usage
+
+See the documentation for the individual blocks for details about what
+they can do and how they should be used. Furthermore, there are
+examples for these blocks in <b>gnuradio-examples/python/pfb</b>.
+
+The main issue when using the PFB blocks is defining the prototype
+filter, which is passed to all of the blocks as a vector of \p
+taps. The taps from the prototype filter which get partitioned among
+the \p N channels of the channelizer.
+
+An example of creating a set of filter taps for a PFB channelizer is
+found on line 49 of <b>gnuradio-examples/python/pfb/channelizer.py</b>
+and reproduced below. Notice that the sample rate is the sample rate
+at the input to the channelizer while the bandwidth and transition
+width are defined for the channel bandwidths. This makes a fairly long
+filter that is then split up between the \p N channels of the PFB.
+
+\code
+ self._fs = 9000 # input sample rate
+ self._M = 9 # Number of channels to channelize
+
+ self._taps = gr.firdes.low_pass_2(1, self._fs, 475.50, 50,
+ attenuation_dB=100,
+ window=gr.firdes.WIN_BLACKMAN_hARRIS)
+\endcode
+
+In this example, the signal into the channelizer is sampled at 9 ksps
+(complex, so 9 kHz of bandwidth). The filter uses 9 channels, so each
+output channel will have a bandwidth and sample rate of 1 kHz. We want
+to pass most of the channel, so we define the channel bandwidth to be
+a low pass filter with a bandwidth of 475.5 Hz and a transition
+bandwidth of 50 Hz, but we have defined this using a sample rate of
+the original 9 kHz. The prototype filter has 819 taps to be divided up
+between the 9 channels, so each channel uses 91 taps. This is probably
+over-kill for a channelizer, and we could reduce the amount of taps
+per channel to a couple of dozen with no ill effects.
+
+The basic rule when defining a set of taps for a PFB block is to think
+about the filter running at the highest rate it will see while the
+bandwidth is defined for the size of the channels. In the channelizer
+case, the highest rate is defined as the rate of the incoming signal,
+but in other PFB blocks, this is not so obvious.
+
+Two very useful blocks to use are the arbitrary resampler and the
+clock synchronizer (for PAM signals). These PFBs are defined with a
+set number of filters based on the fidelity required from them, not
+the rate changes. By default, the \p filter_size is set to 32 for
+these blocks, which is a reasonable default for most tasks. Because
+the PFB uses this number of filters in the filterbank, the maximum
+rate of the bank is defined from this (see the theory of a polyphase
+interpolator for a justification of this). So the prototype filter is
+defined to use a sample rate of \p filter_size times the signal's
+sampling rate.
+
+A helpful wrapper for the arbitrary resampler is found in
+<b>gnuradio-core/src/python/gnuradio/blks2impl/pfb_arb_resampler.py</b>,
+which is exposed in Python as <b>blks2.pfb_arb_resampler_ccf</b> and
+<b>blks2.pfb_arb_resampler_fff</b>. This block is set up so that the
+user only needs to pass it the real number \p rate as the resampling
+rate. With just this information, this hierarchical block
+automatically creates a filter that fully passes the signal bandwidth
+being resampled but does not pass any out-of-band noise. See the code
+for this block for details of how the filter is constructed.
+
+Of course, a user can create his or her own taps and use them in the
+arbitrary resampler for more specific requirements. Some of the UHD
+examples (<b>gr-uhd/examples</b>) use this ability to create a
+received matched filter or channel filter that also resamples the
+signal.
+
+\section Examples
+
+The following is an example of the using the channelizer. It creates
+the appropriate filter to channelizer 9 channels out of an original
+signal that is 9000 Hz wide, so each output channel is now 1000
+Hz. The code then plots the PSD of the original signal to see the
+signals in the origina spectrum and then makes 9 plots for each of the
+channels.
+
+NOTE: you need the Scipy and Matplotlib Python modules installed to
+run this example.
+
+\include gnuradio-core/src/examples/pfb/channelize.py
+
+*/
diff --git a/docs/doxygen/other/pmt.dox b/docs/doxygen/other/pmt.dox
new file mode 100644
index 000000000..61b73bca1
--- /dev/null
+++ b/docs/doxygen/other/pmt.dox
@@ -0,0 +1,348 @@
+/*! \page page_pmt Polymorphic Types
+
+\section intro Introduction
+
+Polymorphic Types are opaque data types that are designed as generic
+containers of data that can be safely passed around between blocks and
+threads in GNU Radio. They are heavily used in the stream tags and
+message passing interfaces. The most complete list of PMT function is,
+of course, the source code, specifically the header file pmt.h. This
+manual page summarizes the most important features and points of PMTs.
+
+
+\section datatype PMT Data Type
+
+All PMTs are of the type pmt::pmt_t. This is an opaque container and
+PMT functions must be used to manipulate and even do things like
+compare PMTs. PMTs are also \a immutable (except PMT vectors). We
+never change the data in a PMT; instead, we create a new PMT with the
+new data. The main reason for this is thread safety. We can pass PMTs
+as tags and messages between blocks and each receives its own copy
+that we can read from. However, we can never write to this object, and
+so if multiple blocks have a reference to the same PMT, there is no
+possibility of thread-safety issues of one reading the PMT data while
+another is writing the data. If a block is trying to write new data to
+a PMT, it actually creates a new PMT to put the data into. Thus we
+allow easy access to data in the PMT format without worrying about
+mutex locking and unlocking while manipulating them.
+
+PMTs can represent the following:
+
+- Boolean values of true/false
+- Strings (as symbols)
+- Integers (long and uint64)
+- Floats (as doubles)
+- Complex (as two doubles)
+- Pairs
+- Tuples
+- Vectors (of PMTs)
+- Uniform vectors (of any standard data type)
+- Dictionaries (list of key:value pairs)
+- Any (contains a boost::any pointer to hold anything)
+
+The PMT library also defines a set of functions that operate directly
+on PMTs such as:
+
+- Equal/equivalence between PMTs
+- Length (of a tuple or vector)
+- Map (apply a function to all elements in the PMT)
+- Reverse
+- Get a PMT at a position in a list
+- Serialize and deserialize
+- Printing
+
+The constants in the PMT library are:
+
+- pmt::PMT_T - a PMT True
+- pmt::PMT_F - a PMT False
+- pmt::PMT_NIL - an empty PMT (think Python's 'None')
+
+\section insert Inserting and Extracting Data
+
+Use pmt.h for a complete guide to the list of functions used to create
+PMTs and get the data from a PMT. When using these functions, remember
+that while PMTs are opaque and designed to hold any data, the data
+underneath is still a C++ typed object, and so the right type of
+set/get function must be used for the data type.
+
+Typically, a PMT object can be made from a scalar item using a call
+like "pmt::pmt_from_<type>". Similarly, when getting data out of a
+PMT, we use a call like "pmt::pmt_to_<type>". For example:
+
+\code
+double a = 1.2345;
+pmt::pmt_t pmt_a = pmt::pmt_from_double(a);
+double b = pmt::pmt_to_double(pmt_a);
+
+int c = 12345;
+pmt::pmt_t pmt_c = pmt::pmt_from_long(c);
+int d = pmt::pmt_to_long(pmt_c);
+\endcode
+
+As a side-note, making a PMT from a complex number is not obvious:
+
+\code
+std::complex<double> a(1.2, 3.4);
+pmt::pmt_t pmt_a = pmt::pmt_make_rectangular(a.real(), b.imag());
+std::complex<double> b = pmt::pmt_to_complex(pmt_a);
+\endcode
+
+Pairs, dictionaries, and vectors have different constructors and ways
+to manipulate them, and these are explained in their own sections.
+
+
+\section strings Strings
+
+PMTs have a way of representing short strings. These strings are
+actually stored as interned symbols in a hash table, so in other
+words, only one PMT object for a given string exists. If creating a
+new symbol from a string, if that string already exists in the hash
+table, the constructor will return a reference to the existing PMT.
+
+We create strings with the following functions, where the second
+function, pmt::pmt_intern, is simply an alias of the first.
+
+\code
+pmt::pmt_t str0 = pmt::pmt_string_to_symbol(std::string("some string"));
+pmt::pmt_t str1 = pmt::pmt_intern(std::string("some string"));
+\endcode
+
+The string can be retrieved using the inverse function:
+
+\code
+std::string s = pmt::pmt_symbol_to_string(str0);
+\endcode
+
+
+\section tests Tests and Comparisons
+
+The PMT library comes with a number of functions to test and compare
+PMT objects. In general, for any PMT data type, there is an equivalent
+"pmt::pmt_is_<type>". We can use these to test the PMT before trying
+to access the data inside. Expanding our examples above, we have:
+
+\code
+pmt::pmt_t str0 = pmt::pmt_string_to_symbol(std::string("some string"));
+if(pmt::pmt_is_symbol(str0))
+ std::string s = pmt::pmt_symbol_to_string(str0);
+
+double a = 1.2345;
+pmt::pmt_t pmt_a = pmt::pmt_from_double(a);
+if(pmt::pmt_is_double(pmt_a))
+ double b = pmt::pmt_to_double(pmt_a);
+
+int c = 12345;
+pmt::pmt_t pmt_c = pmt::pmt_from_long(c);
+if(pmt::pmt_is_long(pmt_a))
+ int d = pmt::pmt_to_long(pmt_c);
+
+\\ This will fail the test. Otherwise, trying to coerce \b pmt_c as a
+\\ double when internally it is a long will result in an exception.
+if(pmt::pmt_is_double(pmt_a))
+ double d = pmt::pmt_to_double(pmt_c);
+
+\endcode
+
+
+\section dict Dictionaries
+
+PMT dictionaries and lists of key:value pairs. They have a
+well-defined interface for creating, adding, removing, and accessing
+items in the dictionary. Note that every operation that changes the
+dictionary both takes a PMT dictionary as an argument and returns a
+PMT dictionary. The dictionary used as an input is not changed and the
+returned dictionary is a new PMT with the changes made there.
+
+The following is a list of PMT dictionary functions. Click through to
+get more information on what each does.
+
+- bool pmt::pmt_is_dict(const pmt_t &obj)
+- pmt_t pmt::pmt_make_dict()
+- pmt_t pmt::pmt_dict_add(const pmt_t &dict, const pmt_t &key, const pmt_t &value)
+- pmt_t pmt::pmt_dict_delete(const pmt_t &dict, const pmt_t &key)
+- bool pmt::pmt_dict_has_key(const pmt_t &dict, const pmt_t &key)
+- pmt_t pmt::pmt_dict_ref(const pmt_t &dict, const pmt_t &key, const pmt_t &not_found)
+- pmt_t pmt::pmt_dict_items(pmt_t dict)
+- pmt_t pmt::pmt_dict_keys(pmt_t dict)
+- pmt_t pmt::pmt_dict_values(pmt_t dict)
+
+This example does some basic manipulations of PMT dictionaries in
+Python. Notice that we pass the dictionary \a a and return the results
+to \a a. This still creates a new dictionary and removes the local
+reference to the old dictionary. This just keeps our number of
+variables small.
+
+\code
+from gruel import pmt
+
+key0 = pmt.pmt_intern("int")
+val0 = pmt.pmt_from_long(123)
+val1 = pmt.pmt_from_long(234)
+
+key1 = pmt.pmt_intern("double")
+val2 = pmt.pmt_from_double(5.4321)
+
+# Make an empty dictionary
+a = pmt.pmt_make_dict()
+
+# Add a key:value pair to the dictionary
+a = pmt.pmt_dict_add(a, key0, val0)
+pmt.pmt_print(a)
+
+# Add a new value to the same key;
+# new dict will still have one item with new value
+a = pmt.pmt_dict_add(a, key0, val1)
+pmt.pmt_print(a)
+
+# Add a new key:value pair
+a = pmt.pmt_dict_add(a, key1, val2)
+pmt.pmt_print(a)
+
+# Test if we have a key, then delete it
+print pmt.pmt_dict_has_key(a, key1)
+a = pmt.pmt_dict_delete(a, key1)
+print pmt.pmt_dict_has_key(a, key1)
+
+ref = pmt.pmt_dict_ref(a, key0, pmt.PMT_NIL)
+pmt.pmt_print(ref)
+
+# The following should never print
+if(pmt.pmt_dict_has_key(a, key0) and pmt.pmt_eq(ref, pmt.PMT_NIL)):
+ print "Trouble! We have key0, but it returned PMT_NIL"
+\endcode
+
+\section vectors Vectors
+
+PMT vectors come in two forms: vectors of PMTs and vectors of uniform
+data. The standard PMT vector is a vector of PMTs, and each PMT can be
+of any internal type. On the other hand, uniform PMTs are of a
+specific data type which come in the form:
+
+- (u)int8
+- (u)int16
+- (u)int32
+- (u)int64
+- float32
+- float64
+- complex 32 (std::complex<float>)
+- complex 64 (std::complex<double>)
+
+That is, the standard sizes of integers, floats, and complex types of
+both signed and unsigned.
+
+Vectors have a well-defined interface that allows us to make, set,
+get, and fill them. We can also get the length of a vector with
+pmt::pmt_length.
+
+For standard vectors, these functions look like:
+
+- bool pmt::pmt_is_vector(pmt_t x)
+- pmt_t pmt::pmt_make_vector(size_t k, pmt_t fill)
+- pmt_t pmt::pmt_vector_ref(pmt_t vector, size_t k)
+- void pmt::pmt_vector_set(pmt_t vector, size_t k, pmt_t obj)
+- void pmt::pmt_vector_fill(pmt_t vector, pmt_t fill)
+
+Uniform vectors have the same types of functions, but they are data
+type-dependent. The following list tries to explain them where you
+substitute the specific data type prefix for \a dtype (prefixes being:
+u8, u16, u32, u64, s8, s16, s32, s64, f32, f64, c32, c64).
+
+- bool pmt::pmt_is_(dtype)vector(pmt_t x)
+- pmt_t pmt::pmt_make_(dtype)vector(size_t k, (dtype) fill)
+- pmt_t pmt::pmt_init_(dtype)vector(size_t k, const (dtype*) data)
+- pmt_t pmt::pmt_init_(dtype)vector(size_t k, const std::vector<dtype> data)
+- pmt_t pmt::pmt_(dtype)vector_ref(pmt_t vector, size_t k)
+- void pmt::pmt_(dtype)vector_set(pmt_t vector, size_t k, (dtype) x)
+- const dtype* pmt::pmt_(dtype)vector_elements(pmt_t vector, size_t &len)
+- dtype* pmt::pmt_(dtype)vector_writable_elements(pmt_t vector, size_t &len)
+
+\b Note: We break the contract with vectors. The 'set' functions
+actually change the data underneath. It is important to keep track of
+the implications of setting a new value as well as accessing the
+'vector_writable_elements' data. Since these are mostly standard data
+types, sets and gets are atomic, so it is unlikely to cause a great
+deal of harm. But it's only unlikely, not impossible. Best to use
+mutexes whenever manipulating data in a vector.
+
+
+\subsection blob BLOB
+
+A BLOB is a 'binary large object' type. In PMT's, this is actually
+just a thin wrapper around a u8vector.
+
+\section pairs Pairs
+
+Pairs are inspired by LISP 'cons' data types, so you will find the
+language here comes from LISP. A pair is just a pair of PMT
+objects. They are manipulated using the following functions:
+
+- bool pmt::pmt_is_pair (const pmt_t &obj): Return true if obj is a pair, else false
+- pmt_t pmt::pmt_cons(const pmt_t &x, const pmt_t &y): construct new pair
+- pmt_t pmt::pmt_car(const pmt_t &pair): get the car of the pair (first object)
+- pmt_t pmt::pmt_cdr(const pmt_t &pair): get the cdr of the pair (second object)
+- void pmt::pmt_set_car(pmt_t pair, pmt_t value): Stores value in the car field
+- void pmt::pmt_set_cdr(pmt_t pair, pmt_t value): Stores value in the cdr field
+
+
+\section serdes Serializing and Deserializing
+
+It is often important to hide the fact that we are working with PMTs
+to make them easier to transmit, store, write to file, etc. The PMT
+library has methods to serialize data into a string buffer or a
+string and then methods to deserialize the string buffer or string
+back into a PMT. We use this extensively in the metadata files (see
+\ref page_metadata).
+
+- bool pmt::pmt_serialize(pmt_t obj, std::streambuf &sink)
+- std::string pmt::pmt_serialize_str(pmt_t obj)
+- pmt_t pmt::pmt_deserialize(std::streambuf &source)
+- pmt_t pmt::pmt_deserialize_str(std::string str)
+
+For example, we will serialize the data above to make it into a string
+ready to be written to a file and then deserialize it back to its
+original PMT.
+
+\code
+from gruel import pmt
+
+key0 = pmt.pmt_intern("int")
+val0 = pmt.pmt_from_long(123)
+
+key1 = pmt.pmt_intern("double")
+val1 = pmt.pmt_from_double(5.4321)
+
+# Make an empty dictionary
+a = pmt.pmt_make_dict()
+
+# Add a key:value pair to the dictionary
+a = pmt.pmt_dict_add(a, key0, val0)
+a = pmt.pmt_dict_add(a, key1, val1)
+
+pmt.pmt_print(a)
+
+ser_str = pmt.pmt_serialize_str(a)
+print ser_str
+
+b = pmt.pmt_deserialize_str(ser_str)
+pmt.pmt_print(b)
+
+\endcode
+
+The line where we 'print ser_str' will print and parts will be
+readable, but the point of serializing is not to make a human-readable
+string. This is only done here as a test.
+
+
+\section printing Printing
+
+We have used the pmt::pmt_print function in these examples to nicely
+print the contents of a PMT. Another way to print the contents is
+using the overloaded "<<" operator with a stream buffer object. In
+C++, we can inline print the contents of a PMT like:
+
+\code
+pmt::pmt_t a pmt::pmt_from_double(1.0);
+std::cout << "The PMT a contains " << a << std::endl;
+\endcode
+
+*/
diff --git a/docs/doxygen/other/shared_ptr_docstub.h b/docs/doxygen/other/shared_ptr_docstub.h
new file mode 100755
index 000000000..428dd03f1
--- /dev/null
+++ b/docs/doxygen/other/shared_ptr_docstub.h
@@ -0,0 +1,24 @@
+namespace boost
+{
+ /*!
+ * \brief shared_ptr documentation stub
+ *
+ * \warning
+ * This isn't the real shared_ptr template. It's just enough to get doxygen
+ * to draw pretty collaboration diagrams.
+ *
+ * An enhanced relative of scoped_ptr with reference counted copy semantics.
+ * The object pointed to is deleted when the last shared_ptr pointing to it
+ * is destroyed or reset.
+ */
+
+template<class T> class shared_ptr
+{
+public:
+
+ T * px; // contained pointer
+
+}; // shared_ptr
+
+
+} // namespace boost
diff --git a/docs/doxygen/other/thread_affinity.dox b/docs/doxygen/other/thread_affinity.dox
new file mode 100644
index 000000000..235266feb
--- /dev/null
+++ b/docs/doxygen/other/thread_affinity.dox
@@ -0,0 +1,65 @@
+/*! \page page_affinity Block Thread Affinity
+
+\section intro Introduction
+
+In the thread-per-block scheduler, you can set the block's core
+affinity. Each block can be pinned to a group cores or be set back
+to use the standard kernel scheduler.
+
+The implementation is done by adding new functions to the GRUEL
+library:
+
+\code
+ gr_thread_t get_current_thread_id();
+ void thread_bind_to_processor(unsigned int n);
+ void thread_bind_to_processor(const std::vector<unsigned int> &mask);
+ void thread_bind_to_processor(gr_thread_t thread, unsigned int n);
+ void thread_bind_to_processor(gr_thread_t thread, const std::vector<unsigned int> &mask);
+ void thread_unbind();
+ void thread_unbind(gr_thread_t thread);
+\endcode
+
+The ability to set a thread's affinity to a core or groups of cores is
+not implemented in the Boost thread library, and so we have made our
+own portability library. In particular, the gruel::gr_thread_t type is
+defined as the thread type for the given system. The other functions
+are designed to be portable as well by calling the specific
+implementation for the thread affinity for a particular platform.
+
+There are functions to set a thread to a group of cores. If the thread
+is not given, the current thread is used. If a single number is
+passed, only that core is set (this is equivalent to a core mask with
+just a single value).
+
+Similarly, there are functions to unset the affinity. This practically
+implements the setting of the thread's affinity to all possible
+cores. Again, the function that does not take a thread argument unsets
+the affinity for the current thread.
+
+
+\section affinity_api GNU Radio Block API
+
+Each block has two new data members:
+
+- threaded: a boolean value that is true if the block is attached to a
+ thread.
+- thread: a gruel::gr_thread_t handle to the block's thread.
+
+A block can set and unset it's affinity at any time using the
+following member functions:
+
+- gr_block::set_processor_affinity(const std::vector<unsigned int> &mask)
+- gr_block::unset_processor_affinity()
+
+Where \p mask is a vector of core numbers to set the thread's affinity
+to.
+
+The current core affinity can be retrieved using the member function:
+
+- gr_block::processor_affinity()
+
+When set before the flowgraph is started, the scheduler will set the
+thread's affinity when it is started. When already running, the
+block's affinity will be immediately set.
+
+*/
diff --git a/docs/doxygen/other/tv-channel-frequencies b/docs/doxygen/other/tv-channel-frequencies
new file mode 100644
index 000000000..8613124f7
--- /dev/null
+++ b/docs/doxygen/other/tv-channel-frequencies
@@ -0,0 +1,79 @@
+# These are the center frequencies in MHz for North American broadcast
+# TV channels. Each channel is 6 MHz wide, hence the bottom edge of the
+# channel is 3 MHz below the value in this table.
+#
+# For NTSC (Analog) TV, the picture carrier is 1.25 MHz up from the
+# bottom edge. NTSC has the FM audio at bottom + 5.75 and the spike is easily
+# visible.
+#
+# For ATSC, the pilot tone is 0.31 MHz up from the bottom.
+#
+#
+ 2 57.00
+ 3 63.00
+ 4 69.00
+ 5 79.00
+ 6 85.00
+ 7 177.00
+ 8 183.00
+ 9 189.00
+ 10 195.00
+ 11 201.00
+ 12 207.00
+ 13 213.00
+ 14 473.00
+ 15 479.00
+ 16 485.00
+ 17 491.00
+ 18 497.00
+ 19 503.00
+ 20 509.00
+ 21 515.00
+ 22 521.00
+ 23 527.00
+ 24 533.00
+ 25 539.00
+ 26 545.00
+ 27 551.00
+ 28 557.00
+ 29 563.00
+ 30 569.00
+ 31 575.00
+ 32 581.00
+ 33 587.00
+ 34 593.00
+ 35 599.00
+ 36 605.00
+ 37 611.00
+ 38 617.00
+ 39 623.00
+ 40 629.00
+ 41 635.00
+ 42 641.00
+ 43 647.00
+ 44 653.00
+ 45 659.00
+ 46 665.00
+ 47 671.00
+ 48 677.00
+ 49 683.00
+ 50 689.00
+ 51 695.00
+ 52 701.00
+ 53 707.00
+ 54 713.00
+ 55 719.00
+ 56 725.00
+ 57 731.00
+ 58 737.00
+ 59 743.00
+ 60 749.00
+ 61 755.00
+ 62 761.00
+ 63 767.00
+ 64 773.00
+ 65 779.00
+ 66 785.00
+ 67 791.00
+ 68 797.00
+ 69 803.00
diff --git a/docs/doxygen/other/vector_docstub.h b/docs/doxygen/other/vector_docstub.h
new file mode 100644
index 000000000..062e08d30
--- /dev/null
+++ b/docs/doxygen/other/vector_docstub.h
@@ -0,0 +1,16 @@
+namespace std
+{
+ /*!
+ * \brief vector documentation stub
+ *
+ * \warning
+ * This isn't the real vector template. It's just enough to get doxygen
+ * to draw pretty collaboration diagrams.
+ */
+ template<class T> class vector
+ {
+ public:
+ T *p;
+ };
+
+} // namespace std