Web   ·   Wiki   ·   Activities   ·   Blog   ·   Lists   ·   Chat   ·   Meeting   ·   Bugs   ·   Git   ·   Translate   ·   Archive   ·   People   ·   Donate
summaryrefslogtreecommitdiffstats
path: root/TurtleArt
diff options
context:
space:
mode:
Diffstat (limited to 'TurtleArt')
-rw-r--r--TurtleArt/RtfParser.py150
-rw-r--r--TurtleArt/audiograb.py622
-rw-r--r--TurtleArt/rfidutils.py123
-rw-r--r--TurtleArt/ringbuffer.py108
-rw-r--r--TurtleArt/tacamera.py42
-rw-r--r--TurtleArt/tacanvas.py227
-rw-r--r--TurtleArt/tacollaboration.py282
-rw-r--r--TurtleArt/taconstants.py176
-rw-r--r--TurtleArt/taexporthtml.py7
-rw-r--r--TurtleArt/talogo.py198
-rw-r--r--TurtleArt/taturtle.py4
-rw-r--r--TurtleArt/tautils.py36
-rw-r--r--TurtleArt/tawindow.py247
-rw-r--r--TurtleArt/v4l2.py1914
14 files changed, 672 insertions, 3464 deletions
diff --git a/TurtleArt/RtfParser.py b/TurtleArt/RtfParser.py
deleted file mode 100644
index 9a141a4..0000000
--- a/TurtleArt/RtfParser.py
+++ /dev/null
@@ -1,150 +0,0 @@
-#Copyright (c) 2010, Loic Fejoz
-#Copyright (c) 2010, Walter Bender
-
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation; either version 3 of the License, or
-# (at your option) any later version.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, write to the
-# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
-# Boston, MA 02111-1307, USA.
-
-
-import sys
-
-
-class RtfException(Exception):
- pass
-
-plaintext = 1
-control = 2
-argument = 3
-backslash = 4
-escapedChar = 5
-
-
-class RtfParser(object):
-
- def __init__(self, unicode=False):
- self.state = plaintext
- self.arg = ''
- self.token = ''
- self.unicode = unicode
- self.par = False
- self.output = ''
-
- def getChar(self, code):
- """ called when an escaped char is found """
- return chr(code)
-
- def getNonBreakingSpace(self):
- return ' '
-
- def pushState(self):
- pass
-
- def popState(self):
- pass
-
- def putChar(self):
- pass
-
- def doControl(self, token, arg):
- pass
-
- def feed(self, txt):
- for c in txt:
- self.feedChar(c)
-
- def feedChar(self, char):
- if self.state == plaintext: # this is just normal user content
- if char == '\\':
- self.state = backslash
- elif char == '{':
- self.pushState()
- elif char == '}':
- self.popState()
- else:
- self.putChar(char)
- elif self.state == backslash: # a command or escape
- if char == '\\' or char == '{' or char == '}':
- self.putChar(char)
- self.state = plaintext
- else:
- if char.isalpha() or char in ('*', '-', '|'):
- self.state = control
- self.token = char
- elif char == "'":
- self.state = escapedChar
- self.escapedChar = ''
- elif char in ['\\', '{', '}']:
- self.putChar(char)
- self.state = plaintext
- elif char == "~": # non breaking space
- self.putChar(self.getNonBreakingSpace())
- self.state = plaintext
- else:
- raise RtfException(('unexpected %s after \\' % char))
- elif self.state == escapedChar:
- self.escapedChar = self.escapedChar + char
- if len(self.escapedChar) == 2:
- char = self.getChar(int(self.escapedChar, 16))
- self.putChar(char)
- self.state = plaintext
- elif self.state == control: # collecting the command token
- if char.isalpha():
- self.token = self.token + char
- elif char.isdigit() or char == '-':
- self.state = argument
- self.arg = char
- else:
- self.doControl(self.token, self.arg)
- self.state = plaintext
- if char == '\\':
- self.state = backslash
- elif char == '{':
- self.pushState()
- elif char == '}':
- self.popState()
- else:
- if not char.isspace():
- self.putChar(char)
- elif self.state == argument: # collecting the optional argument
- if char.isdigit():
- self.arg = self.arg + char
- else:
- self.state = plaintext
- self.doControl(self.token, self.arg)
- if char == '\\':
- self.state = backslash
- elif char == '{':
- self.pushState()
- elif char == '}':
- self.popState()
- else:
- if not char.isspace():
- self.putChar(char)
-
-
-class RtfTextOnly(RtfParser):
-
- def __init__(self):
- RtfParser.__init__(self)
- self.level = 0
-
- def pushState(self):
- self.level = self.level + 1
-
- def popState(self):
- self.level = self.level - 1
-
- def putChar(self, ch):
- if self.par:
- self.output += ch
-
- def doControl(self, token, arg):
- if token[0:3] == 'par':
- self.par = True
- pass
diff --git a/TurtleArt/audiograb.py b/TurtleArt/audiograb.py
deleted file mode 100644
index 3ecdc11..0000000
--- a/TurtleArt/audiograb.py
+++ /dev/null
@@ -1,622 +0,0 @@
-#! /usr/bin/python
-#
-# Author: Arjun Sarwal arjun@laptop.org
-# Copyright (C) 2007, Arjun Sarwal
-# Copyright (C) 2009,10 Walter Bender
-# Copyright (C) 2009, Benjamin Berg, Sebastian Berg
-# Copyright (C) 2009, Sayamindu Dasgupta
-# Copyright (C) 2010, Sascha Silbe
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-import pygst
-import gst
-import gst.interfaces
-from numpy import fromstring
-import os
-import subprocess
-from string import find
-from threading import Timer
-
-# Initial device settings
-RATE = 48000
-MIC_BOOST = True
-DC_MODE_ENABLE = False
-CAPTURE_GAIN = 50
-BIAS = True
-
-# Setting on quit
-QUIT_MIC_BOOST = False
-QUIT_DC_MODE_ENABLE = False
-QUIT_CAPTURE_GAIN = 100
-QUIT_BIAS = True
-
-import logging
-
-_logger = logging.getLogger('TurtleArt')
-_logger.setLevel(logging.DEBUG)
-logging.basicConfig()
-
-from taconstants import SENSOR_AC_NO_BIAS, SENSOR_AC_BIAS, SENSOR_DC_NO_BIAS, \
- SENSOR_DC_BIAS, XO1
-
-
-class AudioGrab:
- """ The interface between measure and the audio device """
-
- def __init__(self, callable1, activity):
- """ Initialize the class: callable1 is a data buffer;
- activity is the parent class"""
-
- self.callable1 = callable1
- self.activity = activity
- self.sensor = None
-
- self.temp_buffer = [0]
- self.picture_buffer = [] # place to hold screen grabs
-
- self.draw_graph_status = False
- self.screenshot = True
-
- self.rate = 48000
- self.final_count = 0
- self.count_temp = 0
- self.entry_count = 0
-
- self.counter_buffer = 0
-
- self._dc_control = None
- self._mic_bias_control = None
- self._capture_control = None
- self._mic_boost_control = None
- self._hardwired = False # Query controls or use hardwired names
-
- # Set up gst pipeline
- self.pipeline = gst.Pipeline("pipeline")
- self.alsasrc = gst.element_factory_make("alsasrc", "alsa-source")
- self.pipeline.add(self.alsasrc)
- self.caps1 = gst.element_factory_make("capsfilter", "caps1")
- self.pipeline.add(self.caps1)
- caps_str = "audio/x-raw-int,rate=%d,channels=1,depth=16" % (RATE)
- self.caps1.set_property("caps", gst.caps_from_string(caps_str))
- self.fakesink = gst.element_factory_make("fakesink", "fsink")
- self.pipeline.add(self.fakesink)
- self.fakesink.connect("handoff", self.on_buffer)
- self.fakesink.set_property("signal-handoffs", True)
- gst.element_link_many(self.alsasrc, self.caps1, self.fakesink)
-
- self.dont_queue_the_buffer = False
-
- self._mixer = gst.element_factory_make('alsamixer')
- rc = self._mixer.set_state(gst.STATE_PAUSED)
- assert rc == gst.STATE_CHANGE_SUCCESS
-
- # Query the available controls
- try: # F11+
- _logger.debug('controls: %r', [t.props.untranslated_label \
- for t in self._mixer.list_tracks()])
- self._dc_control = self._find_control(['dc mode'])
- self._mic_bias_control = self._find_control(['mic bias',
- 'dc input bias',
- 'v_refout'])
- self._mic_boost_control = self._find_control(['mic boost',
- 'mic boost (+20db)',
- 'internal mic boost',
- 'analog mic boost'])
- self._mic_gain_control = self._find_control(['mic'])
- self._capture_control = self._find_control(['capture'])
- self._master_control = self._find_control(['master'])
- except AttributeError: # F9- (no untranslated_label attribute)
- self._hardwired = True
-
- # Variables for saving and resuming state of sound device
- self.master = self.get_master()
- self.bias = BIAS
- self.dcmode = DC_MODE_ENABLE
- self.capture_gain = CAPTURE_GAIN
- self.mic_boost = MIC_BOOST
- self.mic = self.get_mic_gain()
-
- # Timer for interval sampling and switch to indicate when to capture
- self.capture_timer = None
- self.capture_interval_sample = False
-
- def set_handoff_signal(self, handoff_state):
- """Sets whether the handoff signal would generate an interrupt or not"""
- self.fakesink.set_property("signal-handoffs", handoff_state)
-
- def _new_buffer(self, buf):
- """ Use a new buffer """
- if not self.dont_queue_the_buffer:
- self.temp_buffer = buf
- self.callable1(buf)
- else:
- pass
-
- def on_buffer(self, element, buffer, pad):
- """The function that is called whenever new data is available
- This is the signal handler for the handoff signal"""
- if buffer is None:
- _logger.debug('audiograb buffer is None')
- return False
-
- temp_buffer = fromstring(buffer, 'int16')
- if not self.dont_queue_the_buffer:
- self._new_buffer(temp_buffer)
- return False
-
- def set_sensor(self, sensor):
- """Keep a reference to the sensot toolbar for logging"""
- self.sensor = sensor
-
- def start_sound_device(self):
- """Start or Restart grabbing data from the audio capture"""
- gst.event_new_flush_start()
- self.pipeline.set_state(gst.STATE_PLAYING)
-
- def stop_sound_device(self):
- """Stop grabbing data from capture device"""
- gst.event_new_flush_stop()
- self.pipeline.set_state(gst.STATE_NULL)
-
- def set_sampling_rate(self, sr):
- """Sets the sampling rate of the capture device
- Sampling rate must be given as an integer for example 16000 for
- setting 16Khz sampling rate
- The sampling rate would be set in the device to the nearest available"""
- self.pause_grabbing()
- caps_str = "audio/x-raw-int,rate=%d,channels=1,depth=16" % (sr, )
- self.caps1.set_property("caps", gst.caps_from_string(caps_str))
- self.resume_grabbing()
-
- def get_sampling_rate(self):
- """Gets the sampling rate of the capture device"""
- return int(self.caps1.get_property("caps")[0]['rate'])
-
- def set_callable1(self, callable1):
- """Sets the callable to the drawing function for giving the
- data at the end of idle-add"""
- self.callable1 = callable1
-
- def start_grabbing(self):
- """Called right at the start of the Activity"""
- self.start_sound_device()
-
- def pause_grabbing(self):
- """When Activity goes into background"""
- self.save_state()
- self.stop_sound_device()
-
- def resume_grabbing(self):
- """When Activity becomes active after going to background"""
- self.start_sound_device()
- self.resume_state()
-
- def stop_grabbing(self):
- """Not used ???"""
- self.stop_sound_device()
- self.set_handoff_signal(False)
-
- def _find_control(self, prefixes):
- """Try to find a mixer control matching one of the prefixes.
-
- The control with the best match (smallest difference in length
- between label and prefix) will be returned. If no match is found,
- None is returned.
- """
- def best_prefix(label, prefixes):
- matches =\
- [len(label) - len(p) for p in prefixes if label.startswith(p)]
- if not matches:
- return None
-
- matches.sort()
- return matches[0]
-
- controls = []
- for track in self._mixer.list_tracks():
- label = track.props.untranslated_label.lower()
- diff = best_prefix(label, prefixes)
- if diff is not None:
- controls.append((track, diff))
-
- controls.sort(key=lambda e: e[1])
- if controls:
- _logger.debug("found control: %s" %\
- (str(controls[0][0].props.untranslated_label)))
- return controls[0][0]
-
- return None
-
- def save_state(self):
- """Saves the state of all audio controls"""
- self.master = self.get_master()
- self.bias = self.get_bias()
- self.dcmode = self.get_dc_mode()
- self.capture_gain = self.get_capture_gain()
- self.mic_boost = self.get_mic_boost()
-
- def resume_state(self):
- """Put back all audio control settings from the saved state"""
- self.set_master(self.master)
- self.set_bias(self.bias)
- self.set_dc_mode(self.dcmode)
- self.set_capture_gain(self.capture_gain)
- self.set_mic_boost(self.mic_boost)
-
- def _get_mute(self, control, name, default):
- """Get mute status of a control"""
- if not control:
- return default
-
- value = bool(control.flags & gst.interfaces.MIXER_TRACK_MUTE)
- _logger.debug('Getting %s (%s) mute status: %r', name,
- control.props.untranslated_label, value)
- return value
-
- def _set_mute(self, control, name, value):
- """Mute a control"""
- if not control:
- return
-
- self._mixer.set_mute(control, value)
- _logger.debug('Set mute for %s (%s) to %r', name,
- control.props.untranslated_label, value)
-
- def _get_volume(self, control, name):
- """Get volume of a control and convert to a scale of 0-100"""
- if not control:
- return 100
-
- try: # sometimes get_volume does not return a tuple
- hw_volume = self._mixer.get_volume(control)[0]
- except IndexError:
- return 100
-
- min_vol = control.min_volume
- max_vol = control.max_volume
- percent = (hw_volume - min_vol)*100//(max_vol - min_vol)
- return percent
-
- def _set_volume(self, control, name, value):
- """Sets the level of a control on a scale of 0-100"""
- if not control:
- return
-
- # convert value to scale of control
- min_vol = control.min_volume
- max_vol = control.max_volume
- if min_vol != max_vol:
- hw_volume = value*(max_vol - min_vol)//100 + min_vol
- self._mixer.set_volume(control, (hw_volume,)*control.num_channels)
-
- def amixer_set(self, control, state):
- """ Direct call to amixer for old systems. """
- if state:
- os.system("amixer set '%s' unmute" % (control))
- else:
- os.system("amixer set '%s' mute" % (control))
-
- def mute_master(self):
- """Mutes the Master Control"""
- if not self._hardwired and self.activity.hw != XO1:
- self._set_mute(self._master_control, 'Master', True)
- else:
- self.amixer_set('Master', False)
-
- def unmute_master(self):
- """Unmutes the Master Control"""
- if not self._hardwired and self.activity.hw != XO1:
- self._set_mute(self._master_control, 'Master', True)
- else:
- self.amixer_set('Master', True)
-
- def set_master(self, master_val):
- """Sets the Master gain slider settings
- master_val must be given as an integer between 0 and 100 indicating the
- percentage of the slider to be set"""
- if not self._hardwired:
- self._set_volume(self._master_control, 'Master', master_val)
- else:
- os.system("amixer set Master " + str(master_val) + "%")
-
- def get_master(self):
- """Gets the Master gain slider settings. The value returned is an
- integer between 0-100 and is an indicative of the percentage 0 - 100%"""
- if not self._hardwired:
- return self._get_volume(self._master_control, 'master')
- else:
- p = str(subprocess.Popen(["amixer", "get", "Master"],
- stdout=subprocess.PIPE).communicate()[0])
- p = p[find(p, "Front Left:"):]
- p = p[find(p, "[")+1:]
- p = p[:find(p, "%]")]
- return int(p)
-
- def set_bias(self, bias_state=False):
- """Enables / disables bias voltage."""
- if not self._hardwired and self.activity.hw != XO1:
- if self._mic_bias_control is None:
- return
- # if not isinstance(self._mic_bias_control,
- # gst.interfaces.MixerOptions):
- if self._mic_bias_control not in self._mixer.list_tracks():
- return self._set_mute(self._mic_bias_control, 'Mic Bias',
- not bias_state)
-
- # We assume that values are sorted from lowest (=off) to highest.
- # Since they are mixed strings ("Off", "50%", etc.), we cannot
- # easily ensure this by sorting with the default sort order.
- try:
- if bias_state:
- # self._mixer.set_option(self._mic_bias_control, values[-1])
- self._mixer.set_volume(self._mic_bias_control,
- self._mic_bias_control.max_volume)
- else:
- self._mixer.set_volume(self._mic_bias_control,
- self._mic_bias_control.min_volume)
- # self._mixer.set_option(self._mic_bias_control, values[0])
- except TypeError:
- self._set_mute(self._mic_bias_control, 'Mic Bias',
- not bias_state)
- elif self._hardwired:
- self.amixer_set('V_REFOUT Enable', bias_state)
- else:
- self.amixer_set('MIC Bias Enable', bias_state)
-
- def get_bias(self):
- """Check whether bias voltage is enabled."""
- if not self._hardwired:
- if self._mic_bias_control is None:
- return False
- if self._mic_bias_control not in self._mixer.list_tracks():
- return not self._get_mute(self._mic_bias_control, 'Mic Bias',
- False)
- current = self._mixer.get_volume(self._mic_bias_control)
- # same ordering assertion as in set_bias() applies
- # if current == values[0]:
- if current == self._mic_bias_control.min_volume:
- return False
- return True
- else:
- p = str(subprocess.Popen(["amixer", "get", "'V_REFOUT Enable'"],
- stdout=subprocess.PIPE).communicate()[0])
- p = p[find(p, "Mono:"):]
- p = p[find(p, "[")+1:]
- p = p[:find(p, "]")]
- if p == "on":
- return True
- return False
-
- def set_dc_mode(self, dc_mode=False):
- """Sets the DC Mode Enable control
- pass False to mute and True to unmute"""
- if not self._hardwired and self.activity.hw != XO1:
- if self._dc_control is not None:
- self._set_mute(self._dc_control, 'DC mode', not dc_mode)
- else:
- self.amixer_set('DC Mode Enable', dc_mode)
-
- def get_dc_mode(self):
- """Returns the setting of DC Mode Enable control
- i .e. True: Unmuted and False: Muted"""
- if not self._hardwired:
- if self._dc_control is not None:
- return not self._get_mute(self._dc_control, 'DC mode', False)
- else:
- return False
- else:
- p = str(subprocess.Popen(["amixer", "get", "'DC Mode Enable'"],
- stdout=subprocess.PIPE).communicate()[0])
- p = p[find(p, "Mono:"):]
- p = p[find(p, "[")+1:]
- p = p[:find(p, "]")]
- if p == "on":
- return True
- else:
- return False
-
- def set_mic_boost(self, mic_boost=False):
- """Set Mic Boost.
- True = +20dB, False = 0dB"""
- if not self._hardwired:
- if self._mic_boost_control is None:
- return
- if self._mic_boost_control not in self._mixer.list_tracks():
- return self._set_mute(self._mic_boost_control, 'Mic Boost',
- mic_boost)
- value = self._mixer.get_volume(self._mic_boost_control)
- try:
- if mic_boost:
- self._mixer.set_volume(self._mic_boost_control,
- self._mic_boost_control.max_volume)
- else:
- self._mixer.set_volume(self._mic_boost_control,
- self._mic_boost_control.min_volume)
- except TypeError:
- return self._set_mute(self._mic_boost_control, 'Mic Boost',
- not mic_boost)
- else:
- self.amixer_set('Mic Boost (+20dB)', mic_boost)
-
- def get_mic_boost(self):
- """Return Mic Boost setting.
- True = +20dB, False = 0dB"""
- if not self._hardwired:
- if self._mic_boost_control is None:
- return False
- if self._mic_boost_control not in self._mixer.list_tracks():
- return self._get_mute(self._mic_boost_control, 'Mic Boost',
- False)
- current = self._mixer.get_volume(self._mic_boost_control)
- _logger.debug('current: %s' % (str(current)))
- if current != self._mic_boost_control.min_volume:
- return True
- return False
- else:
- p = str(subprocess.Popen(["amixer", "get", "'Mic Boost (+20dB)'"],
- stdout=subprocess.PIPE).communicate()[0])
- p = p[find(p, "Mono:"):]
- p = p[find(p, "[")+1:]
- p = p[:find(p, "]")]
- if p == "on":
- return True
- else:
- return False
-
- def set_capture_gain(self, capture_val):
- """Sets the Capture gain slider settings
- capture_val must be given as an integer between 0 and 100 indicating the
- percentage of the slider to be set"""
- if not self._hardwired and self.activity.hw != XO1:
- if self._capture_control is not None:
- self._set_volume(self._capture_control, 'Capture', capture_val)
- else:
- os.system("amixer set Capture " + str(capture_val) + "%")
-
- def get_capture_gain(self):
- """Gets the Capture gain slider settings. The value returned is an
- integer between 0-100 and is an indicative of the percentage 0 - 100%"""
- if not self._hardwired:
- if self._capture_control is not None:
- return self._get_volume(self._capture_control, 'Capture')
- else:
- return 0
- else:
- p = str(subprocess.Popen(["amixer", "get", "Capture"],
- stdout=subprocess.PIPE).communicate()[0])
- p = p[find(p, "Front Left:"):]
- p = p[find(p, "[")+1:]
- p = p[:find(p, "%]")]
- return int(p)
-
- def set_mic_gain(self, mic_val):
- """Sets the MIC gain slider settings
- mic_val must be given as an integer between 0 and 100 indicating the
- percentage of the slider to be set"""
- if not self._hardwired and self.activity.hw != XO1:
- self._set_volume(self._mic_gain_control, 'Mic', mic_val)
- else:
- os.system("amixer set Mic " + str(mic_val) + "%")
-
- def get_mic_gain(self):
- """Gets the MIC gain slider settings. The value returned is an
- integer between 0-100 and is an indicative of the percentage 0 - 100%"""
- if not self._hardwired:
- return self._get_volume(self._mic_gain_control, 'Mic')
- else:
- p = str(subprocess.Popen(["amixer", "get", "Mic"],
- stdout=subprocess.PIPE).communicate()[0])
- try:
- p = p[find(p, "Mono:"):]
- p = p[find(p, "[")+1:]
- p = p[:find(p, "%]")]
- return int(p)
- except:
- return(0)
-
- def set_sensor_type(self, sensor_type=SENSOR_AC_BIAS):
- """Set the type of sensor you want to use. Set sensor_type according
- to the following
- SENSOR_AC_NO_BIAS - AC coupling with Bias Off --> Very rarely used.
- Use when connecting a dynamic microphone externally
- SENSOR_AC_BIAS - AC coupling with Bias On --> The default settings.
- The internal MIC uses these
- SENSOR_DC_NO_BIAS - DC coupling with Bias Off --> measuring voltage
- output sensor. For example LM35 which gives output proportional
- to temperature
- SENSOR_DC_BIAS - DC coupling with Bias On --> measuing resistance.
- """
- PARAMETERS = {
- SENSOR_AC_NO_BIAS: (False, False, 50, True),
- SENSOR_AC_BIAS: (False, True, 40, True),
- SENSOR_DC_NO_BIAS: (True, False, 0, False),
- SENSOR_DC_BIAS: (True, True, 0, False)
- }
- mode, bias, gain, boost = PARAMETERS[sensor_type]
- _logger.debug("====================================")
- _logger.debug("Set Sensor Type to %s" % (str(sensor_type)))
- self._set_sensor_type(mode, bias, gain, boost)
- _logger.debug("====================================")
-
- def _set_sensor_type(self, mode=None, bias=None, gain=None, boost=None):
- """Helper to modify (some) of the sensor settings."""
- if mode is not None:
- self.set_dc_mode(mode)
- if self._dc_control is not None:
- os.system("amixer get '%s'" %\
- (self._dc_control.props.untranslated_label))
- if bias is not None:
- self.set_bias(bias)
- if self._mic_bias_control is not None:
- os.system("amixer get '%s'" %\
- (self._mic_bias_control.props.untranslated_label))
- if gain is not None:
- self.set_capture_gain(gain)
- if self._capture_control is not None:
- os.system("amixer get '%s'" %\
- (self._capture_control.props.untranslated_label))
- if boost is not None:
- self.set_mic_boost(boost)
- if self._mic_boost_control is not None:
- os.system("amixer get '%s'" %\
- (self._mic_boost_control.props.untranslated_label))
-
- def on_activity_quit(self):
- """When Activity quits"""
- self.set_mic_boost(QUIT_MIC_BOOST)
- self.set_dc_mode(QUIT_DC_MODE_ENABLE)
- self.set_capture_gain(QUIT_CAPTURE_GAIN)
- self.set_bias(QUIT_BIAS)
- self.stop_sound_device()
-
-
-class AudioGrab_XO1(AudioGrab):
- """ Use default parameters for OLPC XO 1.0 laptop """
- pass
-
-
-class AudioGrab_XO15(AudioGrab):
- """ Override parameters for OLPC XO 1.5 laptop """
- def set_sensor_type(self, sensor_type=SENSOR_AC_BIAS):
- """Helper to modify (some) of the sensor settings."""
- PARAMETERS = {
- SENSOR_AC_NO_BIAS: (False, False, 80, True),
- SENSOR_AC_BIAS: (False, True, 80, True),
- SENSOR_DC_NO_BIAS: (True, False, 80, False),
- SENSOR_DC_BIAS: (True, True, 90, False)
- }
- _logger.debug("====================================")
- _logger.debug("Set Sensor Type to %s" % (str(sensor_type)))
- mode, bias, gain, boost = PARAMETERS[sensor_type]
- self._set_sensor_type(mode, bias, gain, boost)
- _logger.debug("====================================")
-
-
-class AudioGrab_Unknown(AudioGrab):
- """ Override parameters for generic hardware """
- def set_sensor_type(self, sensor_type=SENSOR_AC_BIAS):
- """Helper to modify (some) of the sensor settings."""
- PARAMETERS = {
- SENSOR_AC_NO_BIAS: (None, False, 50, True),
- SENSOR_AC_BIAS: (None, True, 40, True),
- SENSOR_DC_NO_BIAS: (True, False, 80, False),
- SENSOR_DC_BIAS: (True, True, 90, False)
- }
- _logger.debug("====================================")
- _logger.debug("Set Sensor Type to %s" % (str(sensor_type)))
- mode, bias, gain, boost = PARAMETERS[sensor_type]
- self._set_sensor_type(mode, bias, gain, boost)
- _logger.debug("====================================")
diff --git a/TurtleArt/rfidutils.py b/TurtleArt/rfidutils.py
deleted file mode 100644
index f2c74b4..0000000
--- a/TurtleArt/rfidutils.py
+++ /dev/null
@@ -1,123 +0,0 @@
-# utils.py - Helper functions for tis2000.py
-# Copyright (C) 2010 Emiliano Pastorino <epastorino@plan.ceibal.edu.uy>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import logging
-
-def find_device():
- """
- Search for devices.
- Return a device instance or None.
- """
- device = None
- for i in os.listdir(os.path.join('.', 'devices')):
- if not os.path.isdir(os.path.join('.', 'devices', i)):
- try:
- _tempmod = __import__('devices.%s'%i.split('.')[0], globals(),
- locals(), ['RFIDReader'], -1)
- devtemp = _tempmod.RFIDReader()
- if devtemp.get_present() == True:
- device = devtemp
- except Exception, e:
- # logging.error("FIND_DEVICE: %s: %s"%(i, e))
- pass
- if device is None:
- logging.debug("No RFID device found")
- return device
-
-
-def strhex2bin(strhex):
- """
- Convert a string representing an hex value into a
- string representing the same value in binary format.
- """
- dic = { '0':"0000",
- '1':"0001",
- '2':"0010",
- '3':"0011",
- '4':"0100",
- '5':"0101",
- '6':"0110",
- '7':"0111",
- '8':"1000",
- '9':"1001",
- 'A':"1010",
- 'B':"1011",
- 'C':"1100",
- 'D':"1101",
- 'E':"1110",
- 'F':"1111"
- }
- binstr = ""
- for i in strhex:
- binstr = binstr + dic[i.upper()]
- return binstr
-
-def strbin2dec(strbin):
- """
- Convert a string representing a binary value into a
- string representing the same value in decimal format.
- """
- strdec = "0"
- for i in range(1, strbin.__len__()+1):
- strdec = str(int(strdec)+int(strbin[-i])*int(pow(2, i-1)))
- return strdec
-
-def dec2bin(ndec):
- """
- Convert a decimal number into a string representing
- the same value in binary format.
- """
- if ndec < 1:
- return "0"
- binary = []
- while ndec != 0:
- binary.append(ndec%2)
- ndec = ndec/2
- strbin = ""
- binary.reverse()
- for i in binary:
- strbin = strbin+str(i)
- return strbin
-
-def bin2hex(strbin):
- """
- Convert a string representing a binary number into a string
- representing the same value in hexadecimal format.
- """
- dic = { "0000":"0",
- "0001":"1",
- "0010":"2",
- "0011":"3",
- "0100":"4",
- "0101":"5",
- "0110":"6",
- "0111":"7",
- "1000":"8",
- "1001":"9",
- "1010":"A",
- "1011":"B",
- "1100":"C",
- "1101":"D",
- "1110":"E",
- "1111":"F"
- }
- while strbin.__len__()%4 != 0:
- strbin = '0' + strbin
- strh = ""
- for i in range(0, strbin.__len__()/4):
- strh = strh + dic[str(strbin[i*4:i*4+4])]
- return strh
diff --git a/TurtleArt/ringbuffer.py b/TurtleArt/ringbuffer.py
deleted file mode 100644
index 2afb5c9..0000000
--- a/TurtleArt/ringbuffer.py
+++ /dev/null
@@ -1,108 +0,0 @@
-# Copyright (C) 2009, Benjamin Berg, Sebastian Berg
-# Copyright (C) 2010, Walter Bender
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-import numpy as np
-
-
-class RingBuffer1d(object):
- """This class implements an array being written in as a ring and that can
- be read from continuously ending with the newest data or starting with the
- oldest. It returns a numpy array copy of the data;
- """
-
- def __init__(self, length, dtype=None):
- """Initialize the 1 dimensional ring buffer with the given lengths.
- The initial values are all 0s
- """
- self.offset = 0
-
- self._data = np.zeros(length, dtype=dtype)
-
- self.stored = 0
-
- def fill(self, number):
- self._data.fill(number)
- self.offset = 0
-
- def append(self, data):
- """Append to the ring buffer (and overwrite old data). If len(data)
- is greater then the ring buffers length, the newest data takes
- precedence.
- """
- data = np.asarray(data)
-
- if len(self._data) == 0:
- return
-
- if len(data) >= len(self._data):
- self._data[:] = data[-len(self._data):]
- self.offset = 0
- self.stored = len(self._data)
-
- elif len(self._data) - self.offset >= len(data):
- self._data[self.offset: self.offset + len(data)] = data
- self.offset = self.offset + len(data)
- self.stored += len(data)
- else:
- self._data[self.offset:] = data[:len(self._data) - self.offset]
- self._data[:len(data) - (len(self._data) - self.offset)] = \
- data[-len(data) + (len(self._data) - self.offset):]
- self.offset = len(data) - (len(self._data) - self.offset)
- self.stored += len(data)
-
- if len(self._data) <= self.stored:
- self.read = self._read
-
- def read(self, number=None, step=1):
- """Read the ring Buffer. Number can be positive or negative.
- Positive values will give the latest information, negative values will
- give the newest added information from the buffer. (in normal order)
-
- Before the buffer is filled once: This returns just None
- """
- return np.array([])
-
- def _read(self, number=None, step=1):
- """Read the ring Buffer. Number can be positive or negative.
- Positive values will give the latest information, negative values will
- give the newest added information from the buffer. (in normal order)
- """
- if number == None:
- number = len(self._data) // step
-
- number *= step
- assert abs(number) <= len(self._data), \
- 'Number to read*step must be smaller then length'
-
- if number < 0:
- if abs(number) <= self.offset:
- return self._data[self.offset + number:self.offset:step]
-
- spam = (self.offset - 1) % step
-
- return np.concatenate(
- (self._data[step - spam - 1 + self.offset + number::step],
- self._data[spam:self.offset:step]))
-
- if number - (len(self._data) - self.offset) > 0:
- spam = ((self.offset + number) - self.offset - 1) % step
- return np.concatenate(
- (self._data[self.offset:self.offset + number:step],
- self._data[spam:number -
- (len(self._data) - self.offset):step]))
-
- return self._data[self.offset:self.offset + number:step].copy()
diff --git a/TurtleArt/tacamera.py b/TurtleArt/tacamera.py
deleted file mode 100644
index 2177288..0000000
--- a/TurtleArt/tacamera.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# -*- coding: utf-8 -*-
-#Copyright (c) 2010, Walter Bender
-#Copyright (c) 2010, Tony Forster
-
-#Permission is hereby granted, free of charge, to any person obtaining a copy
-#of this software and associated documentation files (the "Software"), to deal
-#in the Software without restriction, including without limitation the rights
-#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-#copies of the Software, and to permit persons to whom the Software is
-#furnished to do so, subject to the following conditions:
-
-#The above copyright notice and this permission notice shall be included in
-#all copies or substantial portions of the Software.
-
-#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-#THE SOFTWARE.
-
-import gst, time
-
-GST_PIPE = ['v4l2src', 'ffmpegcolorspace', 'pngenc']
-
-class Camera():
- """ A class for representing the video camera """
-
- def __init__(self, imagepath):
- GST_PIPE.append('filesink location=%s' % imagepath)
- self.pipe = gst.parse_launch('!'.join(GST_PIPE))
- self.bus = self.pipe.get_bus()
-
- def save_camera_input_to_file(self):
- """ Grab a frame from the camera """
- self.pipe.set_state(gst.STATE_PLAYING)
- self.bus.poll(gst.MESSAGE_EOS, -1)
-
- def stop_camera_input(self):
- self.pipe.set_state(gst.STATE_NULL)
-
diff --git a/TurtleArt/tacanvas.py b/TurtleArt/tacanvas.py
index d4395a2..79bdd04 100644
--- a/TurtleArt/tacanvas.py
+++ b/TurtleArt/tacanvas.py
@@ -1,5 +1,5 @@
#Copyright (c) 2007-8, Playful Invention Company.
-#Copyright (c) 2008-10, Walter Bender
+#Copyright (c) 2008-11, Walter Bender
#Copyright (c) 2011 Collabora Ltd. <http://www.collabora.co.uk/>
#Permission is hereby granted, free of charge, to any person obtaining a copy
@@ -24,10 +24,11 @@ import gtk
from math import sin, cos, pi
import pango
import cairo
+import base64
from sprites import Sprite
from tasprite_factory import SVG
-from tautils import image_to_base64, data_to_string, round_int
+from tautils import image_to_base64, get_path, data_to_string, round_int
from taconstants import CANVAS_LAYER, BLACK, WHITE
import logging
@@ -43,6 +44,23 @@ def wrap100(n):
return n
+def calc_poly_bounds(poly_points):
+ """ Calculate the minx, miny, width, height of polygon """
+ minx = poly_points[0][0]
+ miny = poly_points[0][1]
+ maxx, maxy = minx, miny
+ for p in poly_points:
+ if p[0] < minx:
+ minx = p[0]
+ elif p[0] > maxx:
+ maxx = p[0]
+ if p[1] < miny:
+ miny = p[1]
+ elif p[1] > maxy:
+ maxy = p[1]
+ return(minx, miny, maxx - minx, maxy - miny)
+
+
def calc_shade(c, s, invert=False):
""" Convert a color to the current shade (lightness/darkness). """
# Assumes 16 bit input values
@@ -148,27 +166,24 @@ class TurtleGraphics:
self.fill = False
if len(self.poly_points) == 0:
return
- minx = self.poly_points[0][0]
- miny = self.poly_points[0][1]
- maxx = minx
- maxy = miny
- for p in self.poly_points:
- if p[0] < minx:
- minx = p[0]
- elif p[0] > maxx:
- maxx = p[0]
- if p[1] < miny:
- miny = p[1]
- elif p[1] > maxy:
- maxy = p[1]
- w = maxx - minx
- h = maxy - miny
- self.canvas.images[0].draw_polygon(self.gc, True, self.poly_points)
+ self.fill_polygon(self.poly_points)
+ if self.tw.sharing():
+ shared_poly_points = []
+ for p in self.poly_points:
+ shared_poly_points.append((self.screen_to_turtle_coordinates(
+ p[0], p[1])))
+ event = "F|%s" % (data_to_string([self._get_my_nick(),
+ shared_poly_points]))
+ self.tw.send_event(event)
+ self.poly_points = []
+
+ def fill_polygon(self, poly_points):
+ minx, miny, w, h = calc_poly_bounds(poly_points)
+ self.canvas.images[0].draw_polygon(self.gc, True, poly_points)
self.invalt(minx - self.pensize * self.tw.coord_scale / 2 - 3,
miny - self.pensize * self.tw.coord_scale / 2 - 3,
w + self.pensize * self.tw.coord_scale + 6,
h + self.pensize * self.tw.coord_scale + 6)
- self.poly_points = []
def clearscreen(self, share=True):
"""Clear the canvas and reset most graphics attributes to defaults."""
@@ -215,13 +230,14 @@ class TurtleGraphics:
self.move_turtle()
if self.tw.saving_svg and self.pendown:
self.tw.svg_string += self.svg.new_path(oldx,
- self.height / 2 - oldy)
+ self.invert_y_coordinate(oldy))
self.tw.svg_string += self.svg.line_to(self.xcor,
- self.height / 2 - self.ycor)
+ self.invert_y_coordinate(self.ycor))
self.tw.svg_string += "\"\n"
self.tw.svg_string += self.svg.style()
- event = "f|%s" % (data_to_string([self._get_my_nick(), int(n)]))
- self._send_event(event, share)
+ if self.tw.sharing() and share:
+ event = "f|%s" % (data_to_string([self._get_my_nick(), int(n)]))
+ self.tw.send_event(event)
def seth(self, n, share=True):
""" Set the turtle heading. """
@@ -232,8 +248,10 @@ class TurtleGraphics:
return
self.heading %= 360
self.turn_turtle()
- event = "r|%s" % (data_to_string([self._get_my_nick(), round_int(self.heading)]))
- self._send_event(event, share)
+ if self.tw.sharing() and share:
+ event = "r|%s" % (data_to_string([self._get_my_nick(),
+ round_int(self.heading)]))
+ self.tw.send_event(event)
def right(self, n, share=True):
""" Rotate turtle clockwise """
@@ -244,8 +262,10 @@ class TurtleGraphics:
return
self.heading %= 360
self.turn_turtle()
- event = "r|%s" % (data_to_string([self._get_my_nick(), round_int(self.heading)]))
- self._send_event(event, share)
+ if self.tw.sharing() and share:
+ event = "r|%s" % (data_to_string([self._get_my_nick(),
+ round_int(self.heading)]))
+ self.tw.send_event(event)
def arc(self, a, r, share=True):
""" Draw an arc """
@@ -260,8 +280,10 @@ class TurtleGraphics:
_logger.debug("bad value sent to %s" % (__name__))
return
self.move_turtle()
- event = "a|%s" % (data_to_string([self._get_my_nick(), [round_int(a), round_int(r)]]))
- self._send_event(event, share)
+ if self.tw.sharing() and share:
+ event = "a|%s" % (data_to_string([self._get_my_nick(),
+ [round_int(a), round_int(r)]]))
+ self.tw.send_event(event)
def rarc(self, a, r):
""" draw a clockwise arc """
@@ -274,8 +296,7 @@ class TurtleGraphics:
oldx, oldy = self.xcor, self.ycor
cx = self.xcor + r * cos(self.heading * DEGTOR)
cy = self.ycor - r * sin(self.heading * DEGTOR)
- x = self.width / 2 + int(cx - r)
- y = self.height / 2 - int(cy + r)
+ x, y = self.turtle_to_screen_coordinates(int(cx - r), int(cy + r))
w = int(2 * r)
h = w
if self.pendown:
@@ -290,9 +311,10 @@ class TurtleGraphics:
self.ycor = cy + r * sin(self.heading * DEGTOR)
if self.tw.saving_svg and self.pendown:
self.tw.svg_string += self.svg.new_path(oldx,
- self.height / 2 - oldy)
+ self.invert_y_coordinate(oldx))
self.tw.svg_string += self.svg.arc_to(self.xcor,
- self.height / 2 - self.ycor, r, a, 0, s)
+ self.invert_y_coordinate(self.ycor),
+ r, a, 0, s)
self.tw.svg_string += "\"\n"
self.tw.svg_string += self.svg.style()
@@ -307,8 +329,7 @@ class TurtleGraphics:
oldx, oldy = self.xcor, self.ycor
cx = self.xcor - r * cos(self.heading * DEGTOR)
cy = self.ycor + r * sin(self.heading * DEGTOR)
- x = self.width / 2 + int(cx - r)
- y = self.height / 2 - int(cy + r)
+ x, y = self.turtle_to_screen_coordinates(int(cx - r), int(cy + r))
w = int(2 * r)
h = w
if self.pendown:
@@ -324,9 +345,9 @@ class TurtleGraphics:
self.ycor = cy - r * sin(self.heading * DEGTOR)
if self.tw.saving_svg and self.pendown:
self.tw.svg_string += self.svg.new_path(oldx,
- self.height / 2 - oldy)
+ self.invert_y_coordinate(oldy))
self.tw.svg_string += self.svg.arc_to(self.xcor,
- self.height / 2 - self.ycor,
+ self.invert_y_coordinate(self.ycor),
r, a, 0, s)
self.tw.svg_string += "\"\n"
self.tw.svg_string += self.svg.style()
@@ -347,8 +368,10 @@ class TurtleGraphics:
self.draw_line(oldx, oldy, self.xcor, self.ycor)
self.move_turtle()
- event = "x|%s" % (data_to_string([self._get_my_nick(), [round_int(x), round_int(y)]]))
- self._send_event(event, share)
+ if self.tw.sharing() and share:
+ event = "x|%s" % (data_to_string([self._get_my_nick(),
+ [round_int(x), round_int(y)]]))
+ self.tw.send_event(event)
def setpensize(self, ps, share=True):
""" Set the pen size """
@@ -361,10 +384,12 @@ class TurtleGraphics:
return
self.tw.active_turtle.set_pen_size(ps)
self.gc.set_line_attributes(int(self.pensize * self.tw.coord_scale),
- gtk.gdk.LINE_SOLID, gtk.gdk.CAP_ROUND, gtk.gdk.JOIN_MITER)
+ gtk.gdk.LINE_SOLID, gtk.gdk.CAP_ROUND, gtk.gdk.JOIN_MITER)
self.svg.set_stroke_width(self.pensize)
- event = "w|%s" % (data_to_string([self._get_my_nick(), round_int(ps)]))
- self._send_event(event, share)
+ if self.tw.sharing() and share:
+ event = "w|%s" % (data_to_string([self._get_my_nick(),
+ round_int(ps)]))
+ self.tw.send_event(event)
def setcolor(self, c, share=True):
""" Set the pen color """
@@ -377,8 +402,10 @@ class TurtleGraphics:
self.tw.active_turtle.set_color(c)
self.set_fgcolor()
self.set_textcolor()
- event = "c|%s" % (data_to_string([self._get_my_nick(), round_int(c)]))
- self._send_event(event, share)
+ if self.tw.sharing() and share:
+ event = "c|%s" % (data_to_string([self._get_my_nick(),
+ round_int(c)]))
+ self.tw.send_event(event)
def setgray(self, g, share=True):
""" Set the gray level """
@@ -394,10 +421,12 @@ class TurtleGraphics:
self.set_fgcolor()
self.set_textcolor()
self.tw.active_turtle.set_gray(self.gray)
- event = "g|%s" % (data_to_string([self._get_my_nick(), round_int(self.gray)]))
- self._send_event(event, share)
+ if self.tw.sharing() and share:
+ event = "g|%s" % (data_to_string([self._get_my_nick(),
+ round_int(self.gray)]))
+ self.tw.send_event(event)
- def settextcolor(self, c):
+ def settextcolor(self, c): # depreciated
""" Set the text color """
try:
self.tcolor = c
@@ -423,8 +452,10 @@ class TurtleGraphics:
self.tw.active_turtle.set_shade(s)
self.set_fgcolor()
self.set_textcolor()
- event = "s|%s" % (data_to_string([self._get_my_nick(), round_int(s)]))
- self._send_event(event, share)
+ if self.tw.sharing() and share:
+ event = "s|%s" % (data_to_string([self._get_my_nick(),
+ round_int(s)]))
+ self.tw.send_event(event)
def fillscreen(self, c, s):
""" Fill screen with color/shade and reset to defaults """
@@ -479,10 +510,11 @@ class TurtleGraphics:
""" Lower or raise the pen """
self.pendown = bool
self.tw.active_turtle.set_pen_state(bool)
- event = "p|%s" % (data_to_string([self._get_my_nick(), bool]))
- self._send_event(event, share)
+ if self.tw.sharing() and share:
+ event = "p|%s" % (data_to_string([self._get_my_nick(), bool]))
+ self.tw.send_event(event)
- def draw_pixbuf(self, pixbuf, a, b, x, y, w, h, path):
+ def draw_pixbuf(self, pixbuf, a, b, x, y, w, h, path, share=True):
""" Draw a pixbuf """
w *= self.tw.coord_scale
h *= self.tw.coord_scale
@@ -492,12 +524,31 @@ class TurtleGraphics:
if self.tw.running_sugar:
# In Sugar, we need to embed the images inside the SVG
self.tw.svg_string += self.svg.image(x - self.width / 2,
- y, w, h, path, image_to_base64(pixbuf, self.tw.activity))
+ y, w, h, path, image_to_base64(pixbuf,
+ get_path(self.tw.activity, 'instance')))
else:
+ # Outside of Sugar, we save a path
self.tw.svg_string += self.svg.image(x - self.width / 2,
y, w, h, path)
-
- def draw_text(self, label, x, y, size, w):
+ if self.tw.sharing() and share:
+ if self.tw.running_sugar:
+ tmp_path = get_path(self.tw.activity, 'instance')
+ else:
+ tmp_path = '/tmp'
+ data = image_to_base64(pixbuf, tmp_path)
+ height = pixbuf.get_height()
+ width = pixbuf.get_width()
+ x, y = self.screen_to_turtle_coordinates(x, y)
+ event = "P|%s" % (data_to_string([self._get_my_nick(),
+ [round_int(a), round_int(b),
+ round_int(x), round_int(y),
+ round_int(w), round_int(h),
+ round_int(width),
+ round_int(height),
+ data]]))
+ self.tw.send_event(event)
+
+ def draw_text(self, label, x, y, size, w, share=True):
""" Draw text """
w *= self.tw.coord_scale
self.gc.set_foreground(self.tw.textcolor)
@@ -532,27 +583,47 @@ class TurtleGraphics:
if self.tw.saving_svg and self.pendown:
self.tw.svg_string += self.svg.text(x - self.width / 2,
y + size, size, w, label)
+ if self.tw.sharing() and share:
+ event = "W|%s" % (data_to_string([self._get_my_nick(),
+ [label, round_int(x),
+ round_int(y), round_int(size),
+ round_int(w)]]))
+ self.tw.send_event(event)
+
+ def turtle_to_screen_coordinates(self, x, y):
+ """ The origin of turtle coordinates is the center of the screen """
+ return self.width / 2 + x, self.invert_y_coordinate(y)
+
+ def screen_to_turtle_coordinates(self, x, y):
+ """ The origin of the screen coordinates is the upper left corner """
+ return x - self.width / 2, self.invert_y_coordinate(y)
+
+ def invert_y_coordinate(self, y):
+ """ Positive y goes up in turtle coordinates, down in sceeen
+ coordinates """
+ return self.height / 2 - y
def draw_line(self, x1, y1, x2, y2):
""" Draw a line """
- x1, y1 = self.width / 2 + int(x1), self.height / 2 - int(y1)
- x2, y2 = self.width / 2 + int(x2), self.height / 2 - int(y2)
+ x1, y1 = self.turtle_to_screen_coordinates(x1, y1)
+ x2, y2 = self.turtle_to_screen_coordinates(x2, y2)
if x1 < x2:
- minx, maxx = x1, x2
+ minx, maxx = int(x1), int(x2)
else:
- minx, maxx = x2, x1
+ minx, maxx = int(x2), int(x1)
if y1 < y2:
- miny, maxy = y1, y2
+ miny, maxy = int(y1), int(y2)
else:
- miny, maxy = y2, y1
+ miny, maxy = int(y2), int(y1)
w, h = maxx - minx, maxy - miny
- self.canvas.images[0].draw_line(self.gc, x1, y1, x2, y2)
+ self.canvas.images[0].draw_line(self.gc, int(x1), int(y1), int(x2),
+ int(y2))
if self.fill and self.poly_points == []:
- self.poly_points.append((x1, y1))
+ self.poly_points.append((int(x1), int(y1)))
if self.fill:
- self.poly_points.append((x2, y2))
- self.invalt(minx - self.pensize * self.tw.coord_scale / 2 - 3,
- miny - self.pensize * self.tw.coord_scale / 2 - 3,
+ self.poly_points.append((int(x2), int(y2)))
+ self.invalt(minx - int(self.pensize * self.tw.coord_scale / 2) - 3,
+ miny - int(self.pensize * self.tw.coord_scale / 2) - 3,
w + self.pensize * self.tw.coord_scale + 6,
h + self.pensize * self.tw.coord_scale + 6)
@@ -562,8 +633,7 @@ class TurtleGraphics:
def move_turtle(self):
""" Move the turtle """
- x, y = self.width / 2 + int(self.xcor), \
- self.height / 2 - int(self.ycor)
+ x, y = self.turtle_to_screen_coordinates(self.xcor, self.ycor)
self.tw.active_turtle.move(
(int(self.cx + x - self.tw.active_turtle.spr.rect.width / 2),
int(self.cy + y - self.tw.active_turtle.spr.rect.height / 2)))
@@ -611,9 +681,9 @@ class TurtleGraphics:
def get_pixel(self):
""" Read the pixel at x, y """
if self.tw.interactive_mode:
- return self.canvas.get_pixel(
- (self.width / 2 + int(self.xcor),
- self.height / 2 - int(self.ycor)), 0, self.tw.color_mode)
+ x, y = self.turtle_to_screen_coordinates(self.xcor, self.ycor)
+ return self.canvas.get_pixel((int(x), int(y)), 0,
+ self.tw.color_mode)
else:
return(-1, -1, -1, -1)
@@ -628,10 +698,9 @@ class TurtleGraphics:
self.tw.active_turtle = self.tw.turtles.get_turtle(k, False)
self.tw.active_turtle.show()
tx, ty = self.tw.active_turtle.get_xy()
- self.xcor = -self.width / 2 + tx + \
- self.tw.active_turtle.spr.rect.width / 2
- self.ycor = self.height / 2 - ty - \
- self.tw.active_turtle.spr.rect.height / 2
+ self.xcor, self.ycor = self.screen_to_turtle_coordinates(tx, ty)
+ self.xcor += self.tw.active_turtle.spr.rect.width / 2
+ self.ycor -= self.tw.active_turtle.spr.rect.height / 2
self.heading = self.tw.active_turtle.get_heading()
self.setcolor(self.tw.active_turtle.get_color(), False)
self.setgray(self.tw.active_turtle.get_gray(), False)
@@ -651,11 +720,3 @@ class TurtleGraphics:
def _get_my_nick(self):
return self.tw.nick
-
- def _send_event(self, entry, share):
- if not share:
- return
-
- if self.tw.sharing():
- print "Sending: %s" % entry
- self.tw.send_event(entry)
diff --git a/TurtleArt/tacollaboration.py b/TurtleArt/tacollaboration.py
index 52164e0..d4b7529 100644
--- a/TurtleArt/tacollaboration.py
+++ b/TurtleArt/tacollaboration.py
@@ -1,9 +1,35 @@
+#Copyright (c) 2011, Walter Bender
+#Copyright (c) 2011 Collabora Ltd. <http://www.collabora.co.uk/>
+
+#Permission is hereby granted, free of charge, to any person obtaining a copy
+#of this software and associated documentation files (the "Software"), to deal
+#in the Software without restriction, including without limitation the rights
+#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+#copies of the Software, and to permit persons to whom the Software is
+#furnished to do so, subject to the following conditions:
+
+#The above copyright notice and this permission notice shall be included in
+#all copies or substantial portions of the Software.
+
+#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+#THE SOFTWARE.
from dbus.service import signal
from dbus.gobject_service import ExportedGObject
import logging
import telepathy
-from TurtleArt.tautils import data_to_string, data_from_string
+
+import gtk
+import base64
+
+from TurtleArt.tautils import data_to_string, data_from_string, get_path, \
+ base64_to_image
+from TurtleArt.taconstants import DEFAULT_TURTLE_COLORS
try:
from sugar import profile
@@ -19,17 +45,19 @@ IFACE = SERVICE
PATH = '/org/laptop/TurtleArtActivity'
_logger = logging.getLogger('turtleart-activity')
+
class Collaboration():
def __init__(self, tw, activity):
""" A simplistic sharing model: the sharer is the master """
self._tw = tw
self._tw.send_event = self.send_event
self._activity = activity
+ self._setup_dispatch_table()
def setup(self):
# TODO: hand off role of master is sharer leaves
self.pservice = presenceservice.get_instance()
- self.initiating = None # sharing (True) or joining (False)
+ self.initiating = None # sharing (True) or joining (False)
# Add my buddy object to the list
owner = self.pservice.get_owner()
@@ -40,6 +68,24 @@ class Collaboration():
self._activity.connect('shared', self._shared_cb)
self._activity.connect('joined', self._joined_cb)
+ def _setup_dispatch_table(self):
+ self._processing_methods = {
+ 't': self._turtle_request,
+ 'T': self._receive_turtle_dict,
+ 'f': self._move_forward,
+ 'a': self._move_in_arc,
+ 'r': self._rotate_turtle,
+ 'x': self._setxy,
+ 'W': self._draw_text,
+ 'c': self._set_pen_color,
+ 'g': self._set_pen_gray_level,
+ 's': self._set_pen_shade,
+ 'w': self._set_pen_width,
+ 'p': self._set_pen_state,
+ 'F': self._fill_polygon,
+ 'P': self._draw_pixbuf
+ }
+
def _shared_cb(self, activity):
self._shared_activity = self._activity._shared_activity
if self._shared_activity is None:
@@ -128,98 +174,25 @@ class Collaboration():
_logger.debug(event)
self.send_event(event)
- def event_received_cb(self, text):
+ def event_received_cb(self, event_message):
"""
Events are sent as a tuple, nick|cmd, where nick is a turle name
and cmd is a turtle event. Everyone gets the turtle dictionary from
the sharer and watches for 't' events, which indicate that a new
turtle has joined.
"""
- if len(text) == 0:
+ if len(event_message) == 0:
return
- # Save active Turtle
+
+ # Save active Turtle
save_active_turtle = self._tw.active_turtle
- e = text.split("|", 2)
- text = e[1]
- if e[0] == 't': # request for turtle dictionary
- if text > 0:
- [nick, colors] = data_from_string(text)
- if nick != self._tw.nick:
- # There may not be a turtle dictionary.
- if hasattr(self, "turtle_dictionary"):
- self.turtle_dictionary[nick] = colors
- else:
- self.turtle_dictionary = {nick: colors}
- # Add new turtle for the joiner.
- self._tw.canvas.set_turtle(nick, colors)
- # Sharer should send turtle dictionary.
- if self.initiating:
- text = data_to_string(self.turtle_dictionary)
- self.send_event("T|" + text)
- elif e[0] == 'T': # Receiving the turtle dictionary.
- if self.waiting_for_turtles:
- if len(text) > 0:
- self.turtle_dictionary = data_from_string(text)
- for nick in self.turtle_dictionary:
- if nick != self._tw.nick:
- colors = self.turtle_dictionary[nick]
- # add new turtle for the joiner
- self._tw.canvas.set_turtle(nick, colors)
- self.waiting_for_turtles = False
- elif e[0] == 'f': # move a turtle forward
- if len(text) > 0:
- [nick, x] = data_from_string(text)
- if nick != self._tw.nick:
- self._tw.canvas.set_turtle(nick)
- self._tw.canvas.forward(x, False)
- elif e[0] == 'a': # move a turtle in an arc
- if len(text) > 0:
- [nick, [a, r]] = data_from_string(text)
- if nick != self._tw.nick:
- self._tw.canvas.set_turtle(nick)
- self._tw.canvas.arc(a, r, False)
- elif e[0] == 'r': # rotate turtle
- if len(text) > 0:
- [nick, h] = data_from_string(text)
- if nick != self._tw.nick:
- self._tw.canvas.set_turtle(nick)
- self._tw.canvas.seth(h, False)
- elif e[0] == 'x': # set turtle xy position
- if len(text) > 0:
- [nick, [x, y]] = data_from_string(text)
- if nick != self._tw.nick:
- self._tw.canvas.set_turtle(nick)
- self._tw.canvas.setxy(x, y, False)
- elif e[0] == 'c': # set turtle pen color
- if len(text) > 0:
- [nick, x] = data_from_string(text)
- if nick != self._tw.nick:
- self._tw.canvas.set_turtle(nick)
- self._tw.canvas.setcolor(x, False)
- elif e[0] == 'g': # set turtle pen gray level
- if len(text) > 0:
- [nick, x] = data_from_string(text)
- if nick != self._tw.nick:
- self._tw.canvas.set_turtle(nick)
- self._tw.canvas.setgray(x, False)
- elif e[0] == 's': # set turtle pen shade
- if len(text) > 0:
- [nick, x] = data_from_string(text)
- if nick != self._tw.nick:
- self._tw.canvas.set_turtle(nick)
- self._tw.canvas.setshade(x, False)
- elif e[0] == 'w': # set turtle pen width
- if len(text) > 0:
- [nick, x] = data_from_string(text)
- if nick != self._tw.nick:
- self._tw.canvas.set_turtle(nick)
- self._tw.canvas.setpensize(x, False)
- elif e[0] == 'p': # set turtle pen state
- if len(text) > 0:
- [nick, x] = data_from_string(text)
- if nick != self._tw.nick:
- self._tw.canvas.set_turtle(nick)
- self._tw.canvas.setpen(x, False)
+
+ try:
+ command, payload = event_message.split("|", 2)
+ self._processing_methods[command](payload)
+ except ValueError:
+ _logger.debug("could not split event message")
+
# Restore active Turtle
self._tw.canvas.set_turtle(self._tw.turtles.get_turtle_key(
save_active_turtle))
@@ -229,27 +202,156 @@ class Collaboration():
if hasattr(self, 'chattube') and self.chattube is not None:
self.chattube.SendText(entry)
+ def _turtle_request(self, payload):
+ if payload > 0:
+ [nick, colors] = data_from_string(payload)
+ if nick != self._tw.nick:
+ # There may not be a turtle dictionary.
+ if hasattr(self, "turtle_dictionary"):
+ self.turtle_dictionary[nick] = colors
+ else:
+ self.turtle_dictionary = {nick: colors}
+ # Add new turtle for the joiner.
+ self._tw.canvas.set_turtle(nick, colors)
+ # Sharer should send turtle dictionary.
+ if self.initiating:
+ event_payload = data_to_string(self.turtle_dictionary)
+ self.send_event("T|" + event_payload)
+
+ def _receive_turtle_dict(self, payload):
+ if self.waiting_for_turtles:
+ if len(payload) > 0:
+ self.turtle_dictionary = data_from_string(payload)
+ for nick in self.turtle_dictionary:
+ if nick != self._tw.nick:
+ colors = self.turtle_dictionary[nick]
+ # add new turtle for the joiner
+ self._tw.canvas.set_turtle(nick, colors)
+ self.waiting_for_turtles = False
+
+ def _draw_pixbuf(self, payload):
+ if len(payload) > 0:
+ [nick, [a, b, x, y, w, h, width, height, data]] =\
+ data_from_string(payload)
+ if nick != self._tw.nick:
+ if self._tw.running_sugar:
+ tmp_path = get_path(self._tw.activity, 'instance')
+ else:
+ tmp_path = '/tmp'
+ file_name = base64_to_image(data, tmp_path)
+ pixbuf = gtk.gdk.pixbuf_new_from_file_at_size(file_name,
+ width, height)
+ x, y = self._tw.canvas.turtle_to_screen_coordinates(x, y)
+ self._tw.canvas.draw_pixbuf(pixbuf, a, b, x, y, w, h,
+ file_name, False)
+
+ def _move_forward(self, payload):
+ if len(payload) > 0:
+ [nick, x] = data_from_string(payload)
+ if nick != self._tw.nick:
+ self._tw.canvas.set_turtle(nick)
+ self._tw.canvas.forward(x, False)
+
+ def _move_in_arc(self, payload):
+ if len(payload) > 0:
+ [nick, [a, r]] = data_from_string(payload)
+ if nick != self._tw.nick:
+ self._tw.canvas.set_turtle(nick)
+ self._tw.canvas.arc(a, r, False)
+
+ def _rotate_turtle(self, payload):
+ if len(payload) > 0:
+ [nick, h] = data_from_string(payload)
+ if nick != self._tw.nick:
+ self._tw.canvas.set_turtle(nick)
+ self._tw.canvas.seth(h, False)
+
+ def _setxy(self, payload):
+ if len(payload) > 0:
+ [nick, [x, y]] = data_from_string(payload)
+ if nick != self._tw.nick:
+ self._tw.canvas.set_turtle(nick)
+ self._tw.canvas.setxy(x, y, False)
+
+ def _draw_text(self, payload):
+ if len(payload) > 0:
+ [nick, [label, x, y, size, w]] = data_from_string(payload)
+ if nick != self._tw.nick:
+ self._tw.canvas.draw_text(label, x, y, size, w, False)
+
+ def _set_pen_color(self, payload):
+ if len(payload) > 0:
+ [nick, x] = data_from_string(payload)
+ if nick != self._tw.nick:
+ self._tw.canvas.set_turtle(nick)
+ self._tw.canvas.setcolor(x, False)
+
+ def _set_pen_gray_level(self, payload):
+ if len(payload) > 0:
+ [nick, x] = data_from_string(payload)
+ if nick != self._tw.nick:
+ self._tw.canvas.set_turtle(nick)
+ self._tw.canvas.setgray(x, False)
+
+ def _set_pen_shade(self, payload):
+ if len(payload) > 0:
+ [nick, x] = data_from_string(payload)
+ if nick != self._tw.nick:
+ self._tw.canvas.set_turtle(nick)
+ self._tw.canvas.setshade(x, False)
+
+ def _set_pen_width(self, payload):
+ if len(payload) > 0:
+ [nick, x] = data_from_string(payload)
+ if nick != self._tw.nick:
+ self._tw.canvas.set_turtle(nick)
+ self._tw.canvas.setpensize(x, False)
+
+ def _set_pen_state(self, payload):
+ if len(payload) > 0:
+ [nick, x] = data_from_string(payload)
+ if nick != self._tw.nick:
+ self._tw.canvas.set_turtle(nick)
+ self._tw.canvas.setpen(x, False)
+
+ def _fill_polygon(self, payload):
+ # Check to make sure that the poly_point array is passed properly
+ if len(payload) > 0:
+ [nick, poly_points] = data_from_string(payload)
+ shared_poly_points = []
+ for i in range(len(poly_points)):
+ shared_poly_points.append((
+ self._tw.canvas.turtle_to_screen_coordinates(
+ poly_points[i][0], poly_points[i][1])))
+ self._tw.canvas.fill_polygon(shared_poly_points)
+
def _get_dictionary(self):
- d = { self._get_nick(): self._get_colors()}
+ d = {self._get_nick(): self._get_colors()}
return d
def _get_nick(self):
return self._tw.nick
def _get_colors(self):
- if profile:
- colors = profile.get_color().to_string()
+ colors = None
+ if self._tw.running_sugar:
+ if profile.get_color() is not None:
+ colors = profile.get_color().to_string()
else:
colors = self._activity.get_colors()
+ if colors is None:
+ colors = '%s,%s' % (DEFAULT_TURTLE_COLORS[0],
+ DEFAULT_TURTLE_COLORS[1])
return colors
+
class ChatTube(ExportedGObject):
def __init__(self, tube, is_initiator, stack_received_cb):
"""Class for setting up tube for sharing."""
super(ChatTube, self).__init__(tube, PATH)
self.tube = tube
- self.is_initiator = is_initiator # Are we sharing or joining activity?
+ self.is_initiator = is_initiator # Are we sharing or joining activity?
self.stack_received_cb = stack_received_cb
self.stack = ''
diff --git a/TurtleArt/taconstants.py b/TurtleArt/taconstants.py
index 77ebefb..f5d97ad 100644
--- a/TurtleArt/taconstants.py
+++ b/TurtleArt/taconstants.py
@@ -113,35 +113,33 @@ TOP_LAYER = 1000
#
PALETTE_NAMES = ['turtle', 'pen', 'colors', 'numbers', 'flow', 'blocks',
- 'extras', 'sensor', 'portfolio', 'trash']
+ 'extras', 'sensor', 'media', 'portfolio', 'trash']
-PALETTES = [['clean', 'forward', 'back', 'show', 'left', 'right',
- 'seth', 'setxy2', 'heading', 'xcor', 'ycor', 'setscale',
- 'arc', 'scale', 'leftpos', 'toppos', 'rightpos',
- 'bottompos'],
+PALETTES = [['forward', 'back', 'clean', 'left', 'right',
+ 'arc', 'setxy2', 'seth', 'xcor', 'ycor', 'heading'],
['penup', 'pendown', 'setpensize', 'fillscreen', 'pensize',
- 'setcolor', 'setshade', 'setgray', 'color', 'shade',
- 'gray', 'startfill', 'stopfill'],
- ['red', 'orange', 'yellow', 'green', 'cyan', 'blue', 'purple',
+ 'startfill', 'stopfill'],
+ ['setcolor', 'setshade', 'setgray', 'color', 'shade', 'gray',
+ 'red', 'orange', 'yellow', 'green', 'cyan', 'blue', 'purple',
'white', 'black'],
['plus2', 'minus2', 'product2',
'division2', 'identity2', 'remainder2', 'sqrt', 'random',
'number', 'greater2', 'less2', 'equal2', 'not', 'and2', 'or2'],
['wait', 'forever', 'repeat', 'if', 'ifelse', 'while', 'until',
'hspace', 'vspace', 'stopstack'],
- ['start', 'hat1', 'stack1', 'hat', 'hat2', 'stack2', 'stack',
- 'storeinbox1', 'storeinbox2', 'string', 'box1', 'box2', 'box',
- 'storein'],
+ ['start', 'storeinbox1', 'storeinbox2', 'string', 'box1', 'box2',
+ 'box', 'storein', 'hat', 'hat1', 'hat2', 'stack', 'stack1',
+ 'stack2'],
['push', 'printheap', 'clearheap', 'pop', 'comment', 'print',
- 'myfunc1arg', 'userdefined',
- 'cartesian', 'width', 'height', 'polar', 'addturtle', 'reskin',
- 'sandwichtop_no_label', 'sandwichbottom'],
- ['kbinput', 'keyboard', 'readpixel', 'see',
- 'sound', 'volume', 'pitch'],
- ['journal', 'audio', 'video', 'description', 'hideblocks',
- 'showblocks', 'fullscreen', 'savepix', 'savesvg', 'mediawait',
- 'picturelist', 'picture1x1a', 'picture1x1', 'picture2x2',
- 'picture2x1', 'picture1x2'],
+ 'myfunc1arg', 'userdefined', 'cartesian', 'polar', 'addturtle',
+ 'reskin', 'sandwichtop_no_label', 'sandwichbottom'],
+ ['kbinput', 'keyboard', 'readpixel', 'see', 'time'],
+ ['journal', 'audio', 'video', 'description', 'string',
+ 'show', 'setscale', 'savepix', 'savesvg', 'scale', 'mediawait'],
+ ['hideblocks', 'showblocks', 'fullscreen', 'picturelist',
+ 'picture1x1a', 'picture1x1', 'picture2x2', 'picture2x1',
+ 'picture1x2', 'leftpos', 'bottompos', 'width', 'rightpos',
+ 'toppos', 'height'],
['empty', 'restoreall']]
#
@@ -152,7 +150,8 @@ COLORS = [["#00FF00", "#00A000"], ["#00FFFF", "#00A0A0"],
["#00FFFF", "#00A0A0"], ["#FF00FF", "#A000A0"],
["#FFC000", "#A08000"], ["#FFFF00", "#A0A000"],
["#FF0000", "#A00000"], ["#FF0000", "#A00000"],
- ["#0000FF", "#0000A0"], ["#FFFF00", "#A0A000"]]
+ ["#A0FF00", "#A0A000"], ["#0000FF", "#0000A0"],
+ ["#FFFF00", "#A0A000"]]
BOX_COLORS = {'red': ["#FF0000", "#A00000"],
'orange': ["#FFD000", "#AA8000"],
@@ -176,6 +175,7 @@ STANDARD_STROKE_WIDTH = 1.0
BLOCK_SCALE = 2.0
PALETTE_SCALE = 1.5
DEFAULT_TURTLE = 'Yertle'
+DEFAULT_TURTLE_COLORS = ['#008000', '#00A000']
HORIZONTAL_PALETTE = 0
VERTICAL_PALETTE = 1
BLACK = -9999
@@ -190,10 +190,6 @@ DEFAULT_SCALE = 33
XO1 = 'xo1'
XO15 = 'xo1.5'
UNKNOWN = 'unknown'
-SENSOR_AC_NO_BIAS = 'external'
-SENSOR_AC_BIAS = 'sound'
-SENSOR_DC_NO_BIAS = 'voltage'
-SENSOR_DC_BIAS = 'resistance'
#
# Block-style definitions
@@ -205,15 +201,15 @@ BASIC_STYLE = []
BASIC_STYLE_EXTENDED_VERTICAL = ['clean', 'penup', 'pendown', 'stack1',
'stack2', 'hideblocks', 'showblocks', 'clearheap', 'printheap', 'kbinput',
'fullscreen', 'cartesian', 'polar', 'startfill', 'mediawait',
- 'stopfill', 'readpixel', 'readcamera', 'vspace']
+ 'stopfill', 'readpixel', 'vspace']
INVISIBLE = ['sandwichcollapsed']
BASIC_STYLE_EXTENDED = ['picturelist', 'picture1x1', 'picture2x2',
'picture2x1', 'picture1x2', 'picture1x1a']
-BASIC_STYLE_1ARG = ['forward', 'back', 'left', 'right', 'seth', 'show', 'image',
+BASIC_STYLE_1ARG = ['forward', 'back', 'left', 'right', 'seth', 'show',
'setscale', 'setpensize', 'setcolor', 'setshade', 'print', 'showaligned',
'settextsize', 'settextcolor', 'print', 'wait', 'storeinbox1', 'savepix',
'storeinbox2', 'wait', 'stack', 'push', 'nop', 'addturtle', 'comment',
- 'savesvg', 'setgray', 'skin', 'reskin']
+ 'image', 'savesvg', 'setgray', 'skin', 'reskin']
BASIC_STYLE_VAR_ARG = ['userdefined', 'userdefined2args', 'userdefined3args']
BULLET_STYLE = ['templatelist', 'list']
BASIC_STYLE_2ARG = ['arc', 'setxy', 'setxy2', 'fillscreen', 'storein', 'write']
@@ -222,9 +218,8 @@ BOX_STYLE = ['number', 'xcor', 'ycor', 'heading', 'pensize', 'color', 'shade',
'toppos', 'rightpos', 'bottompos', 'width', 'height', 'pop', 'keyboard',
'red', 'orange', 'yellow', 'green', 'cyan', 'blue', 'purple', 'white',
'black', 'titlex', 'titley', 'leftx', 'topy', 'rightx', 'bottomy',
- 'sound', 'volume', 'pitch', 'voltage', 'resistance', 'gray', 'see', 'rfid',
- 'luminance']
-BOX_STYLE_MEDIA = ['description', 'audio', 'journal', 'video', 'camera']
+ 'gray', 'see', 'time']
+BOX_STYLE_MEDIA = ['description', 'audio', 'journal', 'video']
NUMBER_STYLE = ['plus2', 'product2', 'myfunc']
NUMBER_STYLE_VAR_ARG = ['myfunc1arg', 'myfunc2arg', 'myfunc3arg']
NUMBER_STYLE_BLOCK = ['random']
@@ -278,7 +273,7 @@ OLD_DOCK = ['and', 'or', 'plus', 'minus', 'division', 'product', 'remainder']
# Blocks that contain media
#
CONTENT_BLOCKS = ['number', 'string', 'description', 'audio', 'video',
- 'journal', 'camera']
+ 'journal']
#
# These blocks get a special skin
@@ -291,11 +286,11 @@ PYTHON_SKIN = ['nop', 'userdefined', 'userdefined2args', 'userdefined3args']
#
# These blocks hold constants
#
-CONSTANTS = {'leftpos':None, 'toppos':None, 'rightpos':None, 'bottompos':None,
- 'width':None, 'height':None, 'red':0, 'orange':10, 'yellow':20,
- 'green':40, 'cyan':50, 'blue':70, 'purple':90, 'titlex':None,
- 'titley':None, 'leftx':None, 'topy':None, 'rightx':None,
- 'bottomy':None}
+CONSTANTS = {'leftpos': None, 'toppos': None, 'rightpos': None,
+ 'bottompos': None, 'width': None, 'height': None, 'red': 0,
+ 'orange': 10, 'yellow': 20, 'green': 40, 'cyan': 50, 'blue': 70,
+ 'purple': 90, 'titlex': None, 'titley': None, 'leftx': None,
+ 'topy': None, 'rightx': None, 'bottomy': None}
#
# Block-name dictionary used for labels
@@ -314,7 +309,6 @@ BLOCK_NAMES = {
'box': [_('box')],
'box1': [_('box 1')],
'box2': [_('box 2')],
- 'camera': [' '],
'cartesian': [_('Cartesian')],
'clean': [_(' clean ')],
'clearheap': [_('empty heap')],
@@ -354,7 +348,6 @@ BLOCK_NAMES = {
'leftx': [_('picture left')],
'less2': ['<'],
'list': ['list'],
- 'luminance': [_('brightness')],
'mediawait': [_('media wait')],
'minus2': ['–'],
'myfunc': [_('Python'), 'f(x)', 'x'],
@@ -388,7 +381,6 @@ BLOCK_NAMES = {
'purple': [_('purple')],
'push': [_('push')],
'random': [_('random'), _('min'), _('max')],
- 'readcamera': [_('read camera')],
'readpixel': [_('read pixel')],
'red': [_('red')],
'remainder2': [_('mod')],
@@ -397,7 +389,6 @@ BLOCK_NAMES = {
'resistance': [_('resistance')],
'restore': [_('restore last')],
'restoreall': [_('restore all')],
- 'rfid': [_('RFID')],
'right': [_('right')],
'rightpos': [_('right')],
'rightx': [_('picture right')],
@@ -447,6 +438,7 @@ BLOCK_NAMES = {
'template2x2': [' '],
'templatelist': [' '],
'textsize': [_('text size')],
+ 'time': [_('time')],
'titlex': [_('title x')],
'titley': [_('title y')],
'toppos': [_('top')],
@@ -520,7 +512,6 @@ PRIMITIVES = {
'leftx': 'leftx',
'less2': 'less?',
'list': 'bulletlist',
- 'luminance': 'luminance',
'mediawait': 'mediawait',
'minus2': 'minus',
'myfunc': 'myfunction',
@@ -534,7 +525,6 @@ PRIMITIVES = {
'pendown': 'pendown',
'pensize': 'pensize',
'penup': 'penup',
- 'pitch': 'pitch',
'plus2': 'plus',
'polar': 'polar',
'pop': 'pop',
@@ -545,12 +535,9 @@ PRIMITIVES = {
'push': 'push',
'random': 'random',
'red': 'red',
- 'readcamera': 'readcamera',
'readpixel': 'readpixel',
'remainder2': 'mod',
'repeat': 'repeat',
- 'resistance': 'resistance',
- 'rfid': 'rfid',
'right': 'right',
'rightpos': 'rpos',
'rightx': 'rightx',
@@ -579,7 +566,6 @@ PRIMITIVES = {
'showblocks': 'showblocks',
'showaligned': 'showaligned',
'skin': 'skin',
- 'sound': 'sound',
'sqrt': 'sqrt',
'stack': 'stack',
'stack1': 'stack1',
@@ -598,6 +584,7 @@ PRIMITIVES = {
'template2x2': 't2x2',
'templatelist': 'bullet',
'textsize': 'textsize',
+ 'time': 'time',
'titlex': 'titlex',
'titley': 'titley',
'toppos': 'tpos',
@@ -605,8 +592,6 @@ PRIMITIVES = {
'userdefined': 'userdefined',
'userdefined2args': 'userdefined2',
'userdefined3args': 'userdefined3',
- 'voltage': 'voltage',
- 'volume': 'volume',
'vspace': 'nop',
'wait': 'wait',
'while2': 'while',
@@ -627,7 +612,6 @@ DEFAULTS = {
'audio': [None],
'back': [100],
'box': [_('my box')],
- 'camera': ['CAMERA'],
'comment': [_('comment')],
'description': [None],
'fillscreen': [60, 80],
@@ -697,6 +681,9 @@ STRING_OR_NUMBER_ARGS = ['plus2', 'equal2', 'less2', 'greater2', 'box',
CONTENT_ARGS = ['show', 'showaligned', 'push', 'storein', 'storeinbox1',
'storeinbox2']
+PREFIX_DICTIONARY = {'journal': '#smedia_', 'description': '#sdescr_',
+ 'audio': '#saudio_', 'video': '#svideo_'}
+
#
# Status blocks
#
@@ -758,8 +745,9 @@ TEMPLATES = {'t1x1': (0.5, 0.5, 0.0625, 0.125, 1.05, 0),
# Names for blocks without names for popup help
#
SPECIAL_NAMES = {
+ 'and2': _('and'),
'audio': _('audio'),
- 'camera': _('camera'),
+ 'description': _('description'),
'division2': _('divide'),
'equal2': _('equal'),
'greater2': _('greater than'),
@@ -769,11 +757,16 @@ SPECIAL_NAMES = {
'ifelse': _('if then else'),
'journal': _('journal'),
'less2': _('less than'),
+ 'or2': _('or'),
'minus2': _('minus'),
'nop': _('Python code'),
'number': _('number'),
'plus2': _('plus'),
'product2': _('multiply'),
+ 'repeat': _('repeat'),
+ 'sandwichtop_no_label': _('top of a collapsible stack'),
+ 'sandwichbottom': _('bottom of a collapsible stack'),
+ 'sensors': _('sensors'),
'sqrt': _('square root'),
'template1x1': _('presentation 1x1'),
'template1x1a': _('presentation 1x1'),
@@ -782,6 +775,9 @@ SPECIAL_NAMES = {
'template2x2': _('presentation 2x2'),
'templatelist': _('presentation bulleted list'),
'textsize': _('text size'),
+ 'userdefined': _('Python block'),
+ 'userdefined2args': _('Python block'),
+ 'userdefined3args': _('Python block'),
'video': _('video'),
'vspace': _('vertical space')}
@@ -799,11 +795,11 @@ HELP_STRINGS = {
'box1': _("Variable 1 (numeric value)"),
'box2': _("Variable 2 (numeric value)"),
'box': _("named variable (numeric value)"),
- 'camera': _('camera output'),
'cartesian': _("displays Cartesian coordinates"),
'clean': _("clears the screen and reset the turtle"),
'clearheap': _("emptys FILO (first-in-last-out heap)"),
- 'color': _("holds current pen color (can be used in place of a number block)"),
+ 'color': _(
+ "holds current pen color (can be used in place of a number block)"),
'colors': _("Palette of pen colors"),
'comment': _("places a comment in your code"),
'debugoff': _("Debug"),
@@ -818,7 +814,8 @@ HELP_STRINGS = {
'forever': _("loops forever"),
'forward': _("moves turtle forward"),
'fullscreen': _("hides the Sugar toolbars"),
- 'gray': _("holds current gray level (can be used in place of a number block)"),
+ 'gray': _(
+ "holds current gray level (can be used in place of a number block)"),
'greater2': _("logical greater-than operator"),
'hat1': _("top of Action 1 stack"),
'hat2': _("top of Action 2 stack"),
@@ -830,14 +827,15 @@ HELP_STRINGS = {
'hspace': _("jogs stack right"),
'identity2': _("identity operator used for extending blocks"),
'ifelse': _("if-then-else operator that uses boolean operators from Numbers palette"),
- 'if': _("if-then operator that uses boolean operators from Numbers palette"),
+ 'if': _(
+ "if-then operator that uses boolean operators from Numbers palette"),
'journal': _("Sugar Journal media object"),
'kbinput': _("query for keyboard input (results stored in keyboard block)"),
'keyboard': _("holds results of query-keyboard block"),
'leftpos': _("xcor of left of screen"),
'left': _("turns turtle counterclockwise (angle in degrees)"),
'less2': _("logical less-than operator"),
- 'luminance': _("light level detected by camera"),
+ 'media': _("Palette of media objects"),
'mediawait': _("wait for current video or audio to complete"),
'minus2': _("subtracts bottom numeric input from top numeric input"),
'myfunc': _("a programmable block: used to add advanced math equations, e.g., sin(x)"),
@@ -849,19 +847,21 @@ HELP_STRINGS = {
'not': _("logical NOT operator"),
'numbers': _("Palette of numeric operators"),
'number': _("used as numeric input in mathematic operators"),
- 'or': _("logical OR operator"),
+ 'or2': _("logical OR operator"),
'orientation': _("changes the orientation of the palette of blocks"),
'pendown': _("Turtle will draw when moved."),
'pen': _("Palette of pen commands"),
- 'pensize': _("holds current pen size (can be used in place of a number block)"),
+ 'pensize': _(
+ "holds current pen size (can be used in place of a number block)"),
'penup': _("Turtle will not draw when moved."),
- 'picture1x1': _("presentation template: select Journal object (with description)"),
- 'picture1x1a': _("presentation template: select Journal object (no description)"),
+ 'picture1x1': _(
+ "presentation template: select Journal object (with description)"),
+ 'picture1x1a': _(
+ "presentation template: select Journal object (no description)"),
'picture1x2': _("presentation template: select two Journal objects"),
'picture2x1': _("presentation template: select two Journal objects"),
'picture2x2': _("presentation template: select four Journal objects"),
'picturelist': _("presentation template: list of bullets"),
- 'pitch': _('microphone input pitch'),
'plus2': _("adds two alphanumeric inputs"),
'polar': _("displays polar coordinates"),
'pop': _("pops value off FILO (first-in last-out heap)"),
@@ -871,7 +871,6 @@ HELP_STRINGS = {
'product2': _("multiplies two numeric inputs"),
'push': _("pushes value onto FILO (first-in last-out heap)"),
'random': _("returns random number between minimum (top) and maximum (bottom) values"),
- 'readcamera': _("Average RGB color from camera is pushed to the stack"),
'readpixel': _("RGB color under the turtle is pushed to the stack"),
'remainder2': _("modular (remainder) operator"),
'repeat': _("loops specified number of times"),
@@ -879,12 +878,12 @@ HELP_STRINGS = {
'reskin': _("put a custom 'shell' on the turtle"),
'restore': _("restores most recent blocks from trash"),
'restoreall': _("restore all blocks from trash"),
- 'rfid': _("RFID"),
'rightpos': _("xcor of right of screen"),
'right': _("turns turtle clockwise (angle in degrees)"),
'run-fastoff': _("Run"),
'run-slowoff': _("Step"),
- 'sandwichbottom': _("bottom block in a collapsibe stack: click to collapse"),
+ 'sandwichbottom': _(
+ "bottom block in a collapsibe stack: click to collapse"),
'sandwichcollapsed': _("bottom block in a collapsed stack: click to open"),
'sandwichtop': _("top of a collapsible stack"),
'sandwichtop_no_label': _("top of a collapsed stack"),
@@ -897,7 +896,8 @@ HELP_STRINGS = {
'sensor': _("Palette of sensor blocks"),
'setcolor': _("sets color of the line drawn by the turtle"),
'setgray': _("sets gray level of the line drawn by the turtle"),
- 'seth': _("sets the heading of the turtle (0 is towards the top of the screen.)"),
+ 'seth': _(
+ "sets the heading of the turtle (0 is towards the top of the screen.)"),
'setpensize': _("sets size of the line drawn by the turtle"),
'setscale': _("sets the scale of media"),
'setshade': _("sets shade of the line drawn by the turtle"),
@@ -909,7 +909,6 @@ HELP_STRINGS = {
'show': _("draws text or show media from the Journal"),
'showblocks': _("restores hidden blocks"),
'skin': _("put a custom 'shell' on the turtle"),
- 'sound': _("raw microphone input signal"),
'sqrt': _("calculates square root"),
'stack1': _("invokes Action 1 stack"),
'stack2': _("invokes Action 2 stack"),
@@ -923,24 +922,31 @@ HELP_STRINGS = {
'storeinbox2': _("stores numeric value in Variable 2"),
'storein': _("stores numeric value in named variable"),
'string': _("string value"),
- 'template1x1': _("presentation template: select Journal object (with description)"),
- 'template1x1a': _("presentation template: select Journal object (no description)"),
+ 'template1x1': _(
+ "presentation template: select Journal object (with description)"),
+ 'template1x1a': _(
+ "presentation template: select Journal object (no description)"),
'template1x2': _("presentation template: select two Journal objects"),
'template2x1': _("presentation template: select two Journal objects"),
'template2x2': _("presentation template: select four Journal objects"),
'templatelist': _("presentation template: list of bullets"),
- 'textcolor': _("holds current text color (can be used in place of a number block)"),
- 'textsize': _("holds current text size (can be used in place of a number block)"),
+ 'textcolor': _(
+ "holds current text color (can be used in place of a number block)"),
+ 'textsize': _(
+ "holds current text size (can be used in place of a number block)"),
+ 'time': _("elapsed time (in seconds) since program started"),
'toppos': _("ycor of top of screen"),
'trash': _("Trashcan"),
'turtle': _("Palette of turtle commands"),
'until': _("do-until-True operator that uses boolean operators from Numbers palette"),
- 'userdefined': _("runs code found in the tamyblock.py module found in the Journal"),
- 'userdefined2args': _("runs code found in the tamyblock.py module found in the Journal"),
- 'userdefined3args': _("runs code found in the tamyblock.py module found in the Journal"),
+ 'userdefined': _(
+ "runs code found in the tamyblock.py module found in the Journal"),
+ 'userdefined2args': _(
+ "runs code found in the tamyblock.py module found in the Journal"),
+ 'userdefined3args': _(
+ "runs code found in the tamyblock.py module found in the Journal"),
'video': _("Sugar Journal video object"),
'voltage': _("sensor voltage"),
- 'volume': _("microphone input volume"),
'vspace': _("jogs stack down"),
'wait': _("pauses program execution a specified number of seconds"),
'while': _("do-while-True operator that uses boolean operators from Numbers palette"),
@@ -953,17 +959,17 @@ HELP_STRINGS = {
#
DEAD_KEYS = ['grave', 'acute', 'circumflex', 'tilde', 'diaeresis', 'abovering']
-DEAD_DICTS = [{'A':192, 'E':200, 'I':204, 'O':210, 'U':217, 'a':224, 'e':232,
- 'i':236, 'o':242, 'u':249},
- {'A':193, 'E':201, 'I':205, 'O':211, 'U':218, 'a':225, 'e':233,
- 'i':237, 'o':243, 'u':250},
- {'A':194, 'E':202, 'I':206, 'O':212, 'U':219, 'a':226, 'e':234,
- 'i':238, 'o':244, 'u':251},
- {'A':195, 'O':211, 'N':209, 'U':360, 'a':227, 'o':245, 'n':241,
- 'u':361},
- {'A':196, 'E':203, 'I':207, 'O':211, 'U':218, 'a':228, 'e':235,
- 'i':239, 'o':245, 'u':252},
- {'A':197, 'a':229}]
+DEAD_DICTS = [{'A': 192, 'E': 200, 'I': 204, 'O': 210, 'U': 217, 'a': 224,
+ 'e': 232, 'i': 236, 'o': 242, 'u': 249},
+ {'A': 193, 'E': 201, 'I': 205, 'O': 211, 'U': 218, 'a': 225,
+ 'e': 233, 'i': 237, 'o': 243, 'u': 250},
+ {'A': 194, 'E': 202, 'I': 206, 'O': 212, 'U': 219, 'a': 226,
+ 'e': 234, 'i': 238, 'o': 244, 'u': 251},
+ {'A': 195, 'O': 211, 'N': 209, 'U': 360, 'a': 227, 'o': 245,
+ 'n': 241, 'u': 361},
+ {'A': 196, 'E': 203, 'I': 207, 'O': 211, 'U': 218, 'a': 228,
+ 'e': 235, 'i': 239, 'o': 245, 'u': 252},
+ {'A': 197, 'a': 229}]
NOISE_KEYS = ['Shift_L', 'Shift_R', 'Control_L', 'Caps_Lock', 'Pause',
'Alt_L', 'Alt_R', 'KP_Enter', 'ISO_Level3_Shift', 'KP_Divide',
'Escape', 'Return', 'KP_Page_Up', 'Up', 'Down', 'Menu',
diff --git a/TurtleArt/taexporthtml.py b/TurtleArt/taexporthtml.py
index 09042f8..2dac7e6 100644
--- a/TurtleArt/taexporthtml.py
+++ b/TurtleArt/taexporthtml.py
@@ -22,7 +22,7 @@ import pygtk
pygtk.require('2.0')
import gtk
import os.path
-from tautils import data_to_string, save_picture, image_to_base64
+from tautils import data_to_string, save_picture, image_to_base64, get_path
from gettext import gettext as _
from cgi import escape
@@ -90,7 +90,8 @@ def save_html(self, tw, embed_flag=True):
tmp = imgdata
else:
pixbuf = gtk.gdk.pixbuf_new_from_file(p)
- imgdata = image_to_base64(pixbuf, tw.activity)
+ imgdata = image_to_base64(pixbuf,
+ get_path(tw.activity, 'instance'))
tmp = self.html_glue['img2'][0]
tmp += imgdata
tmp += self.html_glue['img2'][1]
@@ -110,7 +111,7 @@ def save_html(self, tw, embed_flag=True):
else:
if self.embed_images == True:
imgdata = image_to_base64(save_picture(self.tw.canvas),
- tw.activity)
+ get_path(tw.activity, 'instance'))
else:
imgdata = os.path.join(self.tw.load_save_folder, 'image')
self.tw.save_as_image(imgdata)
diff --git a/TurtleArt/talogo.py b/TurtleArt/talogo.py
index e3576fa..d3b214c 100644
--- a/TurtleArt/talogo.py
+++ b/TurtleArt/talogo.py
@@ -25,12 +25,8 @@ import gtk
from time import time
from math import sqrt
-from numpy import append
-from numpy.fft import rfft
from random import uniform
from operator import isNumberType
-from fcntl import ioctl
-
import os.path
from UserDict import UserDict
@@ -40,32 +36,26 @@ try:
except ImportError:
pass
-from taconstants import TAB_LAYER, BLACK, WHITE, \
- DEFAULT_SCALE, ICON_SIZE, BLOCK_NAMES, CONSTANTS, SENSOR_DC_NO_BIAS, \
- SENSOR_DC_BIAS, XO1, XO15
-from tagplay import play_audio_from_file, play_movie_from_file, stop_media, \
- media_playing
-from tacamera import Camera
-import v4l2
+from taconstants import TAB_LAYER, BLACK, WHITE, DEFAULT_SCALE, ICON_SIZE, \
+ BLOCK_NAMES, CONSTANTS, PREFIX_DICTIONARY
from tajail import myfunc, myfunc_import
from tautils import get_pixbuf_from_journal, convert, data_from_file, \
text_media_type, round_int, chr_to_ord, strtype, get_path
-from RtfParser import RtfTextOnly
-
-from ringbuffer import RingBuffer1d
+from util.RtfParser import RtfTextOnly
from gettext import gettext as _
VALUE_BLOCKS = ['box1', 'box2', 'color', 'shade', 'gray', 'scale', 'pensize',
- 'heading', 'xcor', 'ycor', 'pop', 'see', 'keyboard',
- 'sound', 'volume', 'pitch', 'resistance', 'voltage',
- 'luminance']
+ 'heading', 'xcor', 'ycor', 'pop', 'time', 'keyboard', 'see']
+MEDIA_BLOCKS_DICTIONARY = {} # new media blocks get added here
+PLUGIN_DICTIONARY = {} # new block primitives get added here
import logging
_logger = logging.getLogger('turtleart-activity')
+<<<<<<< HEAD
def find_device():
""" Search for RFID devices. Return a device instance or None. """
device = None
@@ -83,6 +73,8 @@ def find_device():
return device
+=======
+>>>>>>> 860754f7e871617df9d101a51dc64a69b742a0ba
class noKeyError(UserDict):
__missing__ = lambda x, y: 0
@@ -290,6 +282,7 @@ def _identity(x):
return(x)
+<<<<<<< HEAD
def _avg(array, abs_value=False):
""" Calc. the average value of an array """
if len(array) == 0:
@@ -319,6 +312,8 @@ def stop_logo(tw):
tw.active_turtle.show()
+=======
+>>>>>>> 860754f7e871617df9d101a51dc64a69b742a0ba
def _just_stop():
""" yield False to stop stack """
yield False
@@ -337,6 +332,7 @@ class LogoCode:
self.tw = tw
self.oblist = {}
+ # TODO: remove plugin blocks
DEFPRIM = {
'(': [1, lambda self, x: self._prim_opar(x)],
'and': [2, lambda self, x, y: _and(x, y)],
@@ -386,7 +382,6 @@ class LogoCode:
'leftx': [0, lambda self: CONSTANTS['leftx']],
'lpos': [0, lambda self: CONSTANTS['leftpos']],
'less?': [2, lambda self, x, y: _less(x, y)],
- 'luminance': [0, lambda self: self._read_camera(True)],
'mediawait': [0, self._media_wait, True],
'minus': [2, lambda self, x, y: _minus(x, y)],
'mod': [2, lambda self, x, y: _mod(x, y)],
@@ -404,7 +399,6 @@ class LogoCode:
'pendown': [0, lambda self: self.tw.canvas.setpen(True)],
'pensize': [0, lambda self: self.tw.canvas.pensize],
'penup': [0, lambda self: self.tw.canvas.setpen(False)],
- 'pitch': [0, lambda self: self._get_pitch()],
'plus': [2, lambda self, x, y: _plus(x, y)],
'polar': [0, lambda self: self.tw.set_polar(True)],
'pop': [0, lambda self: self._prim_pop()],
@@ -414,12 +408,9 @@ class LogoCode:
'purple': [0, lambda self: CONSTANTS['purple']],
'push': [1, lambda self, x: self._prim_push(x)],
'random': [2, lambda self, x, y: _random(x, y)],
- 'readcamera': [0, lambda self: self._read_camera()],
'readpixel': [0, lambda self: self._read_pixel()],
'red': [0, lambda self: CONSTANTS['red']],
'repeat': [2, self._prim_repeat, True],
- 'resistance': [0, lambda self: self._get_resistance()],
- 'rfid': [0, lambda self: self.tw.rfid_idn],
'right': [1, lambda self, x: self._prim_right(x)],
'rightx': [0, lambda self: CONSTANTS['rightx']],
'rpos': [0, lambda self: CONSTANTS['rightpos']],
@@ -450,7 +441,6 @@ class LogoCode:
'showaligned': [1, lambda self, x: self._show(x, False)],
'showblocks': [0, lambda self: self.tw.showblocks()],
'skin': [1, lambda self, x: self._reskin(x)],
- 'sound': [0, lambda self: self._get_sound()],
'sqrt': [1, lambda self, x: _sqrt(x)],
'stack1': [0, self._prim_stack1, True],
'stack': [1, self._prim_stack, True],
@@ -470,6 +460,7 @@ class LogoCode:
x, y, z, a, b)],
'textcolor': [0, lambda self: self.tw.canvas.textcolor],
'textsize': [0, lambda self: self.tw.textsize],
+ 'time': [0, lambda self: self._elapsed_time()],
'titlex': [0, lambda self: CONSTANTS['titlex']],
'titley': [0, lambda self: CONSTANTS['titley']],
'topy': [0, lambda self: CONSTANTS['topy']],
@@ -480,8 +471,6 @@ class LogoCode:
'userdefined3': [3, lambda self, x, y,
z: self._prim_myblock([x, y, z])],
'video': [1, lambda self, x: self._play_video(x)],
- 'voltage': [0, lambda self: self._get_voltage()],
- 'volume': [0, lambda self: self._get_volume()],
'vres': [0, lambda self: CONSTANTS['height']],
'wait': [1, self._prim_wait, True],
'white': [0, lambda self: WHITE],
@@ -520,9 +509,9 @@ class LogoCode:
self.trace = 0
self.update_values = False
self.gplay = None
- self.ag = None
self.filepath = None
self.dsobject = None
+ self._start_time = None
# Scale factors for depreciated portfolio blocks
self.title_height = int((self.tw.canvas.height / 20) * self.tw.scale)
@@ -531,6 +520,7 @@ class LogoCode:
self.scale = DEFAULT_SCALE
+<<<<<<< HEAD
self.max_samples = 1500
self.input_step = 1
@@ -550,6 +540,18 @@ class LogoCode:
else:
self.imagepath = '/tmp/turtlepic.png'
self.camera = Camera(self.imagepath)
+=======
+ def stop_logo(self):
+ """ Stop logo is called from the Stop button on the toolbar """
+ self.tw.step_time = 0
+ self.step = _just_stop()
+ for plugin in self.tw._plugins:
+ plugin.stop()
+ if self.tw.gst_available:
+ from tagplay import stop_media
+ stop_media(self)
+ self.tw.active_turtle.show()
+>>>>>>> 860754f7e871617df9d101a51dc64a69b742a0ba
def _def_prim(self, name, args, fcn, rprim=False):
""" Define the primitives associated with the blocks """
@@ -574,7 +576,6 @@ class LogoCode:
self.tw.saving_svg = False
self.find_value_blocks()
- self._update_audio_mode()
if self.trace > 0:
self.update_values = True
else:
@@ -601,6 +602,7 @@ class LogoCode:
code = self._blocks_to_code(blk)
if run_flag:
_logger.debug("running code: %s" % (code))
+ self._start_time = time()
self._setup_cmd(code)
if not self.tw.hide:
self.tw.display_coordinates()
@@ -625,35 +627,22 @@ class LogoCode:
code.append(float(blk.values[0]))
except ValueError:
code.append(float(ord(blk.values[0][0])))
- elif blk.name == 'string' or blk.name == 'title':
+ elif blk.name == 'string' or \
+ blk.name == 'title': # depreciated block
if type(blk.values[0]) == float or type(blk.values[0]) == int:
if int(blk.values[0]) == blk.values[0]:
blk.values[0] = int(blk.values[0])
code.append('#s' + str(blk.values[0]))
else:
code.append('#s' + blk.values[0])
- elif blk.name == 'journal':
- if blk.values[0] is not None:
- code.append('#smedia_' + str(blk.values[0]))
- else:
- code.append('#smedia_None')
- elif blk.name == 'description':
- if blk.values[0] is not None:
- code.append('#sdescr_' + str(blk.values[0]))
- else:
- code.append('#sdescr_None')
- elif blk.name == 'audio':
+ elif blk.name in PREFIX_DICTIONARY:
if blk.values[0] is not None:
- code.append('#saudio_' + str(blk.values[0]))
+ code.append(PREFIX_DICTIONARY[blk.name] + \
+ str(blk.values[0]))
else:
- code.append('#saudio_None')
- elif blk.name == 'video':
- if blk.values[0] is not None:
- code.append('#svideo_' + str(blk.values[0]))
- else:
- code.append('#svideo_None')
- elif blk.name == 'camera':
- code.append('#smedia_CAMERA')
+ code.append(PREFIX_DICTIONARY[blk.name] + 'None')
+ elif blk.name in MEDIA_BLOCKS_DICTIONARY:
+ code.append('#smedia_' + blk.name.upper())
else:
return ['%nothing%']
else:
@@ -900,12 +889,16 @@ class LogoCode:
def prim_clear(self):
""" Clear screen """
- stop_media(self)
+ if self.tw.gst_available:
+ from tagplay import stop_media
+ stop_media(self)
self.tw.canvas.clearscreen()
self.scale = DEFAULT_SCALE
- self.tw.set_polar(False)
- self.tw.set_cartesian(False)
+ # Note: users find this "feature" confusing
+ # self.tw.set_polar(False)
+ # self.tw.set_cartesian(False)
self.hidden_turtle = None
+ self._start_time = time()
for name in VALUE_BLOCKS:
self.update_label_value(name)
@@ -953,27 +946,27 @@ class LogoCode:
y = myfunc(f, x)
if str(y) == 'nan':
_logger.debug("python function returned nan")
- stop_logo(self.tw)
+ self.stop_logo()
raise logoerror("#notanumber")
else:
return y
except ZeroDivisionError:
- stop_logo(self.tw)
+ self.stop_logo()
raise logoerror("#zerodivide")
except ValueError, e:
- stop_logo(self.tw)
+ self.stop_logo()
raise logoerror('#' + str(e))
except SyntaxError, e:
- stop_logo(self.tw)
+ self.stop_logo()
raise logoerror('#' + str(e))
except NameError, e:
- stop_logo(self.tw)
+ self.stop_logo()
raise logoerror('#' + str(e))
except OverflowError:
- stop_logo(self.tw)
+ self.stop_logo()
raise logoerror("#overflowerror")
except TypeError:
- stop_logo(self.tw)
+ self.stop_logo()
raise logoerror("#notanumber")
def _prim_forever(self, blklist):
@@ -1106,7 +1099,8 @@ class LogoCode:
if flag and (self.tw.hide or self.tw.step_time == 0):
return
if type(n) == str or type(n) == unicode:
- if n[0:6] == 'media_' and n[6:] != 'CAMERA':
+ if n[0:6] == 'media_' and \
+ n[6:].lower not in MEDIA_BLOCKS_DICTIONARY:
try:
if self.tw.running_sugar:
try:
@@ -1134,9 +1128,8 @@ class LogoCode:
else:
try:
self.keyboard = {'Escape': 27, 'space': 32, ' ': 32,
- 'Return': 13, \
- 'KP_Up': 2, 'KP_Down': 4, 'KP_Left': 1, \
- 'KP_Right': 3}[self.tw.keypress]
+ 'Return': 13, 'KP_Up': 2, 'KP_Down': 4,
+ 'KP_Left': 1, 'KP_Right': 3}[self.tw.keypress]
except KeyError:
self.keyboard = 0
self.update_label_value('keyboard', self.keyboard)
@@ -1149,19 +1142,6 @@ class LogoCode:
self.value_blocks[name] = self.tw.block_list.get_similar_blocks(
'block', name)
- def _update_audio_mode(self):
- """ If there are sensor blocks, set the appropriate audio mode """
- for name in ['sound', 'volume', 'pitch']:
- if len(self.value_blocks[name]) > 0:
- self.tw.audiograb.set_sensor_type()
- return
- if len(self.value_blocks['resistance']) > 0:
- self.tw.audiograb.set_sensor_type(SENSOR_DC_BIAS)
- return
- elif len(self.value_blocks['voltage']) > 0:
- self.tw.audiograb.set_sensor_type(SENSOR_DC_NO_BIAS)
- return
-
def update_label_value(self, name, value=None):
""" Update the label of value blocks to reflect current value """
if self.tw.hide or not self.tw.interactive_mode or \
@@ -1188,10 +1168,12 @@ class LogoCode:
self.update_label_value(name, value)
def _prim_right(self, value):
+ """ Turtle rotates clockwise """
self.tw.canvas.right(float(value))
self.update_label_value('heading', self.tw.canvas.heading)
def _prim_move(self, cmd, value1, value2=None, pendown=True):
+ """ Turtle moves by method specified in value1 """
if value2 is None:
cmd(value1)
else:
@@ -1204,6 +1186,7 @@ class LogoCode:
self.see()
def _prim_arc(self, cmd, value1, value2):
+ """ Turtle draws an arc of degree, radius """
cmd(float(value1), float(value2))
self.update_label_value('xcor',
self.tw.canvas.xcor / self.tw.coord_scale)
@@ -1336,11 +1319,9 @@ class LogoCode:
elif string[0:6] in ['media_', 'descr_', 'audio_', 'video_']:
self.filepath = None
self.dsobject = None
- if string[6:] == 'CAMERA':
- if self.tw.camera_available:
- self.camera.save_camera_input_to_file()
- self.camera.stop_camera_input()
- self.filepath = self.imagepath
+ print string[6:], MEDIA_BLOCKS_DICTIONARY
+ if string[6:].lower() in MEDIA_BLOCKS_DICTIONARY:
+ MEDIA_BLOCKS_DICTIONARY[string[6:].lower()]()
elif os.path.exists(string[6:]): # is it a path?
self.filepath = string[6:]
elif self.tw.running_sugar: # is it a datastore object?
@@ -1377,8 +1358,7 @@ class LogoCode:
if self.dsobject is not None:
self.dsobject.destroy()
else: # assume it is text to display
- x = self._x()
- y = self._y()
+ x, y = self._x(), self._y()
if center:
y -= self.tw.canvas.textsize
self.tw.canvas.draw_text(string, x, y,
@@ -1387,8 +1367,7 @@ class LogoCode:
self.tw.canvas.width - x)
elif type(string) == float or type(string) == int:
string = round_int(string)
- x = self._x()
- y = self._y()
+ x, y = self._x(), self._y()
if center:
y -= self.tw.canvas.textsize
self.tw.canvas.draw_text(string, x, y,
@@ -1401,23 +1380,23 @@ class LogoCode:
if filepath is not None:
self.filepath = filepath
pixbuf = None
- w = self._w()
- h = self._h()
+ w, h = self._w(), self._h()
if w < 1 or h < 1:
return
- if self.filepath is not None and self.filepath != '':
+ if self.dsobject is not None:
+ try:
+ pixbuf = get_pixbuf_from_journal(self.dsobject, w, h)
+ except:
+ _logger.debug("Couldn't open dsobject %s" % (self.dsobject))
+ if pixbuf is None and \
+ self.filepath is not None and \
+ self.filepath != '':
try:
pixbuf = gtk.gdk.pixbuf_new_from_file_at_size(self.filepath,
w, h)
except:
self.tw.showlabel('nojournal', self.filepath)
_logger.debug("Couldn't open filepath %s" % (self.filepath))
- elif self.dsobject is not None:
- try:
- pixbuf = get_pixbuf_from_journal(self.dsobject, w, h)
- except:
- self.tw.showlabel('nojournal', self.dsobject)
- _logger.debug("Couldn't open dsobject %s" % (self.dsobject))
if pixbuf is not None:
if center:
self.tw.canvas.draw_pixbuf(pixbuf, 0, 0,
@@ -1448,8 +1427,12 @@ class LogoCode:
f.close()
except IOError:
self.tw.showlabel('nojournal', self.filepath)
+<<<<<<< HEAD
_logger.debug("Couldn't open filepath %s" % \
(self.filepath))
+=======
+ _logger.debug("Couldn't open %s" % (self.filepath))
+>>>>>>> 860754f7e871617df9d101a51dc64a69b742a0ba
else:
if description is not None:
text = str(description)
@@ -1461,23 +1444,35 @@ class LogoCode:
def _media_wait(self):
""" Wait for media to stop playing """
- while(media_playing(self)):
- yield True
+ if self.tw.gst_available:
+ from tagplay import media_playing
+ while(media_playing(self)):
+ yield True
self._ireturn()
yield True
def _play_sound(self):
""" Sound file from Journal """
- play_audio_from_file(self, self.filepath)
+ if self.tw.gst_available:
+ from tagplay import play_audio_from_file
+ play_audio_from_file(self, self.filepath)
def _play_video(self):
""" Movie file from Journal """
- w = self._w()
- h = self._h()
+ w, h = self._w(), self._h()
if w < 1 or h < 1:
return
- play_movie_from_file(self, self.filepath, self._x(), self._y(),
- self._w(), self._h())
+ if self.tw.gst_available:
+ from tagplay import play_movie_from_file
+ play_movie_from_file(self, self.filepath, self._x(), self._y(),
+ w, h)
+
+ def _elapsed_time(self):
+ """ Number of seconds since program execution has started or
+ clean (prim_clear) block encountered """
+ elapsed_time = int(time() - self._start_time)
+ self.update_label_value('time', elapsed_time)
+ return elapsed_time
def see(self):
""" Read r, g, b from the canvas and return a corresponding palette
@@ -1493,6 +1488,7 @@ class LogoCode:
self.heap.append(b)
self.heap.append(g)
self.heap.append(r)
+<<<<<<< HEAD
def _read_camera(self, luminance_only=False):
""" Read average pixel from camera and push b, g, r to the stack """
@@ -1635,6 +1631,8 @@ class LogoCode:
else:
return 0
+=======
+>>>>>>> 860754f7e871617df9d101a51dc64a69b742a0ba
# Depreciated block methods
def _show_template1x1(self, title, media):
diff --git a/TurtleArt/taturtle.py b/TurtleArt/taturtle.py
index a7a3205..f309eef 100644
--- a/TurtleArt/taturtle.py
+++ b/TurtleArt/taturtle.py
@@ -19,7 +19,7 @@
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
-from taconstants import TURTLE_LAYER
+from taconstants import TURTLE_LAYER, DEFAULT_TURTLE_COLORS
from tasprite_factory import SVG, svg_str_to_pixbuf
from tacanvas import wrap100, color_table
from sprites import Sprite
@@ -149,7 +149,7 @@ class Turtle:
self.shapes = generate_turtle_pixbufs(self.colors)
else:
if turtles is not None:
- self.colors = ['#008000', '#00A000']
+ self.colors = DEFAULT_TURTLE_COLORS
self.shapes = turtles.get_pixbufs()
def set_shapes(self, shapes):
diff --git a/TurtleArt/tautils.py b/TurtleArt/tautils.py
index c66b322..521637e 100644
--- a/TurtleArt/tautils.py
+++ b/TurtleArt/tautils.py
@@ -285,18 +285,30 @@ def get_path(activity, subpath):
"org.laptop.TurtleArtActivity", subpath))
-def image_to_base64(pixbuf, activity):
- """ Convert an image to base64 """
- _file_name = os.path.join(get_path(activity, 'instance'), 'imagetmp.png')
+def image_to_base64(pixbuf, path_name):
+ """ Convert an image to base64-encoded data """
+ file_name = os.path.join(path_name, 'imagetmp.png')
if pixbuf != None:
- pixbuf.save(_file_name, "png")
- _base64 = os.path.join(get_path(activity, 'instance'), 'base64tmp')
- _cmd = "base64 <" + _file_name + " >" + _base64
- subprocess.check_call(_cmd, shell=True)
- _file_handle = open(_base64, 'r')
- _data = _file_handle.read()
- _file_handle.close()
- return _data
+ pixbuf.save(file_name, "png")
+ base64 = os.path.join(path_name, 'base64tmp')
+ cmd = "base64 <" + file_name + " >" + base64
+ subprocess.check_call(cmd, shell=True)
+ file_handle = open(base64, 'r')
+ data = file_handle.read()
+ file_handle.close()
+ return data
+
+
+def base64_to_image(data, path_name):
+ """ Convert base64-encoded data to an image """
+ base64 = os.path.join(path_name, 'base64tmp')
+ file_handle = open(base64, 'w')
+ file_handle.write(data)
+ file_handle.close()
+ file_name = os.path.join(path_name, 'imagetmp.png')
+ cmd = "base64 -d <" + base64 + ">" + file_name
+ subprocess.check_call(cmd, shell=True)
+ return file_name
def movie_media_type(name):
@@ -318,7 +330,7 @@ def image_media_type(name):
def text_media_type(name):
""" Is it text media? """
- return name.lower().endswith(('.txt', '.py', '.lg', '.rtf', '.ta'))
+ return name.lower().endswith(('.txt', '.py', '.lg', '.rtf'))
def round_int(num):
diff --git a/TurtleArt/tawindow.py b/TurtleArt/tawindow.py
index ad830d9..f12a753 100644
--- a/TurtleArt/tawindow.py
+++ b/TurtleArt/tawindow.py
@@ -1,8 +1,7 @@
# -*- coding: utf-8 -*-
#Copyright (c) 2007, Playful Invention Company
-#Copyright (c) 2008-10, Walter Bender
-#Copyright (c) 2009-10 Raúl Gutiérrez Segalés
-#Copyright (C) 2010 Emiliano Pastorino <epastorino@plan.ceibal.edu.uy>
+#Copyright (c) 2008-11, Walter Bender
+#Copyright (c) 2009-11 Raúl Gutiérrez Segalés
#Copyright (c) 2011 Collabora Ltd. <http://www.collabora.co.uk/>
#Permission is hereby granted, free of charge, to any person obtaining a copy
@@ -27,7 +26,14 @@ import pygtk
pygtk.require('2.0')
import gtk
import gobject
-import gst
+
+try:
+ import gst
+ GST_AVAILABLE = True
+except ImportError:
+ # Turtle Art should not fail if gst is not available
+ GST_AVAILABLE = False
+
import os
import os.path
import dbus
@@ -60,7 +66,7 @@ from taconstants import HORIZONTAL_PALETTE, VERTICAL_PALETTE, BLOCK_SCALE, \
NUMBER_STYLE_PORCH, NUMBER_STYLE_BLOCK, \
NUMBER_STYLE_VAR_ARG, CONSTANTS, XO1, XO15, UNKNOWN, \
BASIC_STYLE_VAR_ARG
-from talogo import LogoCode, stop_logo
+from talogo import LogoCode
from tacanvas import TurtleGraphics
from tablock import Blocks, Block
from taturtle import Turtles, Turtle
@@ -76,18 +82,11 @@ from tautils import magnitude, get_load_name, get_save_name, data_from_file, \
dock_dx_dy, data_to_string, journal_check, chooser, \
get_hardware
from tasprite_factory import SVG, svg_str_to_pixbuf, svg_from_file
-from tagplay import stop_media
from sprites import Sprites, Sprite
-from audiograb import AudioGrab_Unknown, AudioGrab_XO1, AudioGrab_XO15
-from rfidutils import strhex2bin, strbin2dec, find_device
from dbus.mainloop.glib import DBusGMainLoop
-HAL_SERVICE = 'org.freedesktop.Hal'
-HAL_MGR_PATH = '/org/freedesktop/Hal/Manager'
-HAL_MGR_IFACE = 'org.freedesktop.Hal.Manager'
-HAL_DEV_IFACE = 'org.freedesktop.Hal.Device'
-REGEXP_SERUSB = '\/org\/freedesktop\/Hal\/devices\/usb_device['\
- 'a-z,A-Z,0-9,_]*serial_usb_[0-9]'
+if GST_AVAILABLE:
+ from tagplay import stop_media
import logging
_logger = logging.getLogger('turtleart-activity')
@@ -96,13 +95,17 @@ _logger = logging.getLogger('turtleart-activity')
class TurtleArtWindow():
""" TurtleArt Window class abstraction """
timeout_tag = [0]
+ _INSTALL_PATH = '/usr/share/turtleart'
+ _ALTERNATE_INSTALL_PATH = '/usr/local/share/turtleart'
+ _PLUGIN_SUBPATH = 'plugins'
def __init__(self, win, path, parent=None, mycolors=None, mynick=None):
self._loaded_project = ''
self.win = None
self._sharing = False
self.parent = parent
- self.send_event = None # method to send events over the network
+ self.send_event = None # method to send events over the network
+ self.gst_available = GST_AVAILABLE
if type(win) == gtk.DrawingArea:
self.interactive_mode = True
self.window = win
@@ -147,7 +150,11 @@ class TurtleArtWindow():
self.mouse_x = 0
self.mouse_y = 0
- locale.setlocale(locale.LC_NUMERIC, '')
+ # if self.running_sugar:
+ try:
+ locale.setlocale(locale.LC_NUMERIC, '')
+ except locale.Error:
+ _logger.debug('unsupported locale')
self.decimal_point = locale.localeconv()['decimal_point']
if self.decimal_point == '' or self.decimal_point is None:
self.decimal_point = '.'
@@ -256,95 +263,91 @@ class TurtleArtWindow():
self._setup_misc()
self._show_toolbar_palette(0, False)
- # setup sound/sensor grab
- if self.hw in [XO1, XO15]:
- PALETTES[PALETTE_NAMES.index('sensor')].append('resistance')
- PALETTES[PALETTE_NAMES.index('sensor')].append('voltage')
- self.audio_started = False
-
- self.camera_available = False
- v4l2src = gst.element_factory_make('v4l2src')
- if v4l2src.props.device_name is not None:
- PALETTES[PALETTE_NAMES.index('sensor')].append('readcamera')
- PALETTES[PALETTE_NAMES.index('sensor')].append('luminance')
- PALETTES[PALETTE_NAMES.index('sensor')].append('camera')
- self.camera_available = True
+ self._plugins = []
+ self._init_plugins()
self.lc = LogoCode(self)
- self.saved_pictures = []
+ self._setup_plugins()
+ self.saved_pictures = []
self.block_operation = ''
- """
- The following code will initialize a USB RFID reader. Please note that
- in order to make this initialization function work, it is necessary to
- set the permission for the ttyUSB device to 0666. You can do this by
- adding a rule to /etc/udev/rules.d
-
- As root (using sudo or su), copy the following text into a new file in
- /etc/udev/rules.d/94-ttyUSB-rules
-
- KERNEL=="ttyUSB[0-9]",MODE="0666"
-
- You only have to do this once.
- """
-
- self.rfid_connected = False
- self.rfid_device = find_device()
- self.rfid_idn = ''
-
- if self.rfid_device is not None:
- _logger.info("RFID device found")
- self.rfid_connected = self.rfid_device.do_connect()
- if self.rfid_connected:
- self.rfid_device.connect("tag-read", self._tag_read_cb)
- self.rfid_device.connect("disconnected", self._disconnected_cb)
-
- loop = DBusGMainLoop()
- bus = dbus.SystemBus(mainloop=loop)
- hmgr_iface = dbus.Interface(bus.get_object(HAL_SERVICE,
- HAL_MGR_PATH), HAL_MGR_IFACE)
-
- hmgr_iface.connect_to_signal('DeviceAdded', self._device_added_cb)
-
- PALETTES[PALETTE_NAMES.index('sensor')].append('rfid')
-
- def _device_added_cb(self, path):
- """
- Called from hal connection when a new device is plugged.
- """
- if not self.rfid_connected:
- self.rfid_device = find_device()
- _logger.debug("DEVICE_ADDED: %s"%self.rfid_device)
- if self.rfid_device is not None:
- _logger.debug("DEVICE_ADDED: RFID device is not None!")
- self.rfid_connected = self._device.do_connect()
- if self.rfid_connected:
- _logger.debug("DEVICE_ADDED: Connected!")
- self.rfid_device.connect("tag-read", self._tag_read_cb)
- self.rfid_device.connect("disconnected", self._disconnected_cb)
-
- def _disconnected_cb(self, device, text):
- """
- Called when the device is disconnected.
- """
- self.rfid_connected = False
- self.rfid_device = None
-
- def _tag_read_cb(self, device, tagid):
- """
- Callback for "tag-read" signal. Receives the read tag id.
- """
- idbin = strhex2bin(tagid)
- self.rfid_idn = strbin2dec(idbin[26:64])
- while self.rfid_idn.__len__() < 9:
- self.rfid_idn = '0' + self.rfid_idn
- print tagid, idbin, self.rfid_idn
-
- def new_buffer(self, buf):
- """ Append a new buffer to the ringbuffer """
- self.lc.ringbuffer.append(buf)
- return True
+ def _get_plugin_home(self):
+ """ Look in current directory first, then usual places """
+ path = os.path.join(os.getcwd(), self._PLUGIN_SUBPATH)
+ if os.path.exists(path):
+ return path
+ path = os.path.expanduser(os.path.join('~', 'Activities',
+ 'TurtleBlocks.activity',
+ self._PLUGIN_SUBPATH))
+ if os.path.exists(path):
+ return path
+ path = os.path.expanduser(os.path.join('~', 'Activities',
+ 'TurtleArt.activity',
+ self._PLUGIN_SUBPATH))
+ if os.path.exists(path):
+ return path
+ path = os.path.join(self._INSTALL_PATH, self._PLUGIN_SUBPATH)
+ if os.path.exists(path):
+ return path
+ path = os.path.join(self._ALTERNATE_INSTALL_PATH,
+ self._PLUGIN_SUBPATH)
+ if os.path.exists(path):
+ return path
+ return None
+
+ def _get_plugin_candidates(self, path):
+ """ Look for plugin files in plugin directory. """
+ plugin_files = []
+ if path is not None:
+ candidates = os.listdir(path)
+ for c in candidates:
+ if c[-10:] == '_plugin.py' and c[0] != '#' and c[0] != '.':
+ plugin_files.append(c.split('.')[0])
+ return plugin_files
+
+ def _init_plugins(self):
+ """ Try importing plugin files from the plugin directory. """
+ for pluginfile in self._get_plugin_candidates(self._get_plugin_home()):
+ pluginclass = pluginfile.capitalize()
+ f = "def f(self): from plugins.%s import %s; return %s(self)" \
+ % (pluginfile, pluginclass, pluginclass)
+ plugins = {}
+ try:
+ exec f in globals(), plugins
+ self._plugins.append(plugins.values()[0](self))
+ except ImportError:
+ print 'failed to import %s' % (pluginclass)
+
+ def _setup_plugins(self):
+ """ Initial setup -- call just once. """
+ for plugin in self._plugins:
+ plugin.setup()
+
+ def _start_plugins(self):
+ """ Start is called everytime we execute blocks. """
+ for plugin in self._plugins:
+ plugin.start()
+
+ def _stop_plugins(self):
+ """ Stop is called whenever we stop execution. """
+ for plugin in self._plugins:
+ plugin.stop()
+
+ def background_plugins(self):
+ """ Background is called when we are pushed to the background. """
+ for plugin in self._plugins:
+ plugin.goto_background()
+
+ def foreground_plugins(self):
+ """ Foreground is called when we are return from the background. """
+ for plugin in self._plugins:
+ plugin.return_to_foreground()
+
+ def _quit_plugins(self):
+ """ Quit is called upon program exit. """
+ for plugin in self._plugins:
+ plugin.quit()
def _setup_events(self):
""" Register the events we listen to. """
@@ -420,29 +423,13 @@ class TurtleArtWindow():
self.lc.prim_clear()
self.display_coordinates()
- def _start_audiograb(self):
- """ Start grabbing audio if there is an audio block in use """
- if len(self.block_list.get_similar_blocks('block',
- ['volume', 'sound', 'pitch', 'resistance', 'voltage'])) > 0:
- if self.audio_started:
- self.audiograb.resume_grabbing()
- else:
- if self.hw == XO15:
- self.audiograb = AudioGrab_XO15(self.new_buffer, self)
- elif self.hw == XO1:
- self.audiograb = AudioGrab_XO1(self.new_buffer, self)
- else:
- self.audiograb = AudioGrab_Unknown(self.new_buffer, self)
- self.audiograb.start_grabbing()
- self.audio_started = True
-
def run_button(self, time):
""" Run turtle! """
if self.running_sugar:
self.activity.recenter()
if self.interactive_mode:
- self._start_audiograb()
+ self._start_plugins()
# Look for a 'start' block
for blk in self.just_blocks():
@@ -462,9 +449,8 @@ class TurtleArtWindow():
def stop_button(self):
""" Stop button """
- stop_logo(self)
- if self.audio_started:
- self.audiograb.pause_grabbing()
+ self.lc.stop_logo()
+ self._stop_plugins()
def set_userdefined(self, blk=None):
""" Change icon for user-defined blocks after loading Python code. """
@@ -1585,6 +1571,7 @@ class TurtleArtWindow():
blk.spr.labels[0] += CURSOR
elif blk.name in BOX_STYLE_MEDIA and blk.name != 'camera':
+ # TODO: isolate reference to camera
self._import_from_journal(self.selected_blk)
if blk.name == 'journal' and self.running_sugar:
self._load_description_block(blk)
@@ -1628,7 +1615,7 @@ class TurtleArtWindow():
dy = 20
blk.expand_in_y(dy)
else:
- self._start_audiograb()
+ self._start_plugins()
self._run_stack(blk)
return
@@ -1691,10 +1678,10 @@ class TurtleArtWindow():
elif blk.name in PYTHON_SKIN:
self._import_py()
else:
- self._start_audiograb()
+ self._start_plugins()
self._run_stack(blk)
- elif blk.name in ['sandwichtop_no_arm_no_label',
+ elif blk.name in ['sandwichtop_no_arm_no_label',
'sandwichtop_no_arm']:
restore_stack(blk)
@@ -1706,7 +1693,7 @@ class TurtleArtWindow():
collapse_stack(top)
else:
- self._start_audiograb()
+ self._start_plugins()
self._run_stack(blk)
def _expand_boolean(self, blk, blk2, dy):
@@ -1790,7 +1777,6 @@ class TurtleArtWindow():
""" Run a stack of blocks. """
if blk is None:
return
- self.lc.ag = None
top = find_top_block(blk)
self.lc.run_blocks(top, self.just_blocks(), True)
if self.interactive_mode:
@@ -1996,9 +1982,9 @@ class TurtleArtWindow():
if keyname == "p":
self.hideshow_button()
elif keyname == 'q':
- if self.audio_started:
- self.audiograb.stop_grabbing()
- stop_media(self.lc)
+ self._plugins_quit()
+ if self.gst_available:
+ stop_media(self.lc)
exit()
elif keyname == 'g':
self._align_to_grid()
@@ -2356,7 +2342,7 @@ class TurtleArtWindow():
def new_project(self):
""" Start a new project """
- stop_logo(self)
+ self.lc.stop_logo()
self._loaded_project = ""
# Put current project in the trash.
while len(self.just_blocks()) > 0:
@@ -2474,7 +2460,7 @@ class TurtleArtWindow():
if self.running_sugar:
try:
dsobject = datastore.get(value)
- except: # Should be IOError, but dbus error is raised
+ except: # Should be IOError, but dbus error is raised
dsobject = None
_logger.debug("couldn't get dsobject %s" % value)
if dsobject is not None:
@@ -2515,6 +2501,7 @@ class TurtleArtWindow():
else:
self._block_skin('pythonoff', blk)
elif btype in BOX_STYLE_MEDIA and blk.spr is not None:
+ # TODO: isolate reference to camera
if len(blk.values) == 0 or blk.values[0] == 'None' or \
blk.values[0] is None or btype == 'camera':
self._block_skin(btype + 'off', blk)
diff --git a/TurtleArt/v4l2.py b/TurtleArt/v4l2.py
deleted file mode 100644
index 9c052fd..0000000
--- a/TurtleArt/v4l2.py
+++ /dev/null
@@ -1,1914 +0,0 @@
-# Python bindings for the v4l2 userspace api
-
-# Copyright (C) 1999-2009 the contributors
-
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-
-# Alternatively you can redistribute this file under the terms of the
-# BSD license as stated below:
-
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-# 1. Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# 2. Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# 3. The names of its contributors may not be used to endorse or promote
-# products derived from this software without specific prior written
-# permission.
-
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
-# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""
-Python bindings for the v4l2 userspace api in Linux 2.6.34
-"""
-
-# see linux/videodev2.h
-
-import ctypes
-
-
-_IOC_NRBITS = 8
-_IOC_TYPEBITS = 8
-_IOC_SIZEBITS = 14
-_IOC_DIRBITS = 2
-
-_IOC_NRSHIFT = 0
-_IOC_TYPESHIFT = _IOC_NRSHIFT + _IOC_NRBITS
-_IOC_SIZESHIFT = _IOC_TYPESHIFT + _IOC_TYPEBITS
-_IOC_DIRSHIFT = _IOC_SIZESHIFT + _IOC_SIZEBITS
-
-_IOC_NONE = 0
-_IOC_WRITE = 1
-_IOC_READ = 2
-
-
-def _IOC(dir_, type_, nr, size):
- return (
- ctypes.c_int32(dir_ << _IOC_DIRSHIFT).value |
- ctypes.c_int32(ord(type_) << _IOC_TYPESHIFT).value |
- ctypes.c_int32(nr << _IOC_NRSHIFT).value |
- ctypes.c_int32(size << _IOC_SIZESHIFT).value)
-
-
-def _IOC_TYPECHECK(t):
- return ctypes.sizeof(t)
-
-
-def _IO(type_, nr):
- return _IOC(_IOC_NONE, type_, nr, 0)
-
-
-def _IOW(type_, nr, size):
- return _IOC(_IOC_WRITE, type_, nr, _IOC_TYPECHECK(size))
-
-
-def _IOR(type_, nr, size):
- return _IOC(_IOC_READ, type_, nr, _IOC_TYPECHECK(size))
-
-
-def _IOWR(type_, nr, size):
- return _IOC(_IOC_READ | _IOC_WRITE, type_, nr, _IOC_TYPECHECK(size))
-
-
-#
-# type alias
-#
-
-enum = ctypes.c_uint
-c_int = ctypes.c_int
-
-
-#
-# time
-#
-
-class timeval(ctypes.Structure):
- _fields_ = [
- ('secs', ctypes.c_long),
- ('usecs', ctypes.c_long),
- ]
-
-
-#
-# v4l2
-#
-
-
-VIDEO_MAX_FRAME = 32
-
-
-VID_TYPE_CAPTURE = 1
-VID_TYPE_TUNER = 2
-VID_TYPE_TELETEXT = 4
-VID_TYPE_OVERLAY = 8
-VID_TYPE_CHROMAKEY = 16
-VID_TYPE_CLIPPING = 32
-VID_TYPE_FRAMERAM = 64
-VID_TYPE_SCALES = 128
-VID_TYPE_MONOCHROME = 256
-VID_TYPE_SUBCAPTURE = 512
-VID_TYPE_MPEG_DECODER = 1024
-VID_TYPE_MPEG_ENCODER = 2048
-VID_TYPE_MJPEG_DECODER = 4096
-VID_TYPE_MJPEG_ENCODER = 8192
-
-
-def v4l2_fourcc(a, b, c, d):
- return ord(a) | (ord(b) << 8) | (ord(c) << 16) | (ord(d) << 24)
-
-
-v4l2_field = enum
-(
- V4L2_FIELD_ANY,
- V4L2_FIELD_NONE,
- V4L2_FIELD_TOP,
- V4L2_FIELD_BOTTOM,
- V4L2_FIELD_INTERLACED,
- V4L2_FIELD_SEQ_TB,
- V4L2_FIELD_SEQ_BT,
- V4L2_FIELD_ALTERNATE,
- V4L2_FIELD_INTERLACED_TB,
- V4L2_FIELD_INTERLACED_BT,
-) = range(10)
-
-
-def V4L2_FIELD_HAS_TOP(field):
- return (
- field == V4L2_FIELD_TOP or
- field == V4L2_FIELD_INTERLACED or
- field == V4L2_FIELD_INTERLACED_TB or
- field == V4L2_FIELD_INTERLACED_BT or
- field == V4L2_FIELD_SEQ_TB or
- field == V4L2_FIELD_SEQ_BT)
-
-
-def V4L2_FIELD_HAS_BOTTOM(field):
- return (
- field == V4L2_FIELD_BOTTOM or
- field == V4L2_FIELD_INTERLACED or
- field == V4L2_FIELD_INTERLACED_TB or
- field == V4L2_FIELD_INTERLACED_BT or
- field == V4L2_FIELD_SEQ_TB or
- field == V4L2_FIELD_SEQ_BT)
-
-
-def V4L2_FIELD_HAS_BOTH(field):
- return (
- field == V4L2_FIELD_INTERLACED or
- field == V4L2_FIELD_INTERLACED_TB or
- field == V4L2_FIELD_INTERLACED_BT or
- field == V4L2_FIELD_SEQ_TB or
- field == V4L2_FIELD_SEQ_BT)
-
-
-v4l2_buf_type = enum
-(
- V4L2_BUF_TYPE_VIDEO_CAPTURE,
- V4L2_BUF_TYPE_VIDEO_OUTPUT,
- V4L2_BUF_TYPE_VIDEO_OVERLAY,
- V4L2_BUF_TYPE_VBI_CAPTURE,
- V4L2_BUF_TYPE_VBI_OUTPUT,
- V4L2_BUF_TYPE_SLICED_VBI_CAPTURE,
- V4L2_BUF_TYPE_SLICED_VBI_OUTPUT,
- V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY,
- V4L2_BUF_TYPE_PRIVATE,
-) = range(1, 9) + [0x80]
-
-
-v4l2_ctrl_type = enum
-(
- V4L2_CTRL_TYPE_INTEGER,
- V4L2_CTRL_TYPE_BOOLEAN,
- V4L2_CTRL_TYPE_MENU,
- V4L2_CTRL_TYPE_BUTTON,
- V4L2_CTRL_TYPE_INTEGER64,
- V4L2_CTRL_TYPE_CTRL_CLASS,
- V4L2_CTRL_TYPE_STRING,
-) = range(1, 8)
-
-
-v4l2_tuner_type = enum
-(
- V4L2_TUNER_RADIO,
- V4L2_TUNER_ANALOG_TV,
- V4L2_TUNER_DIGITAL_TV,
-) = range(1, 4)
-
-
-v4l2_memory = enum
-(
- V4L2_MEMORY_MMAP,
- V4L2_MEMORY_USERPTR,
- V4L2_MEMORY_OVERLAY,
-) = range(1, 4)
-
-
-v4l2_colorspace = enum
-(
- V4L2_COLORSPACE_SMPTE170M,
- V4L2_COLORSPACE_SMPTE240M,
- V4L2_COLORSPACE_REC709,
- V4L2_COLORSPACE_BT878,
- V4L2_COLORSPACE_470_SYSTEM_M,
- V4L2_COLORSPACE_470_SYSTEM_BG,
- V4L2_COLORSPACE_JPEG,
- V4L2_COLORSPACE_SRGB,
-) = range(1, 9)
-
-
-v4l2_priority = enum
-(
- V4L2_PRIORITY_UNSET,
- V4L2_PRIORITY_BACKGROUND,
- V4L2_PRIORITY_INTERACTIVE,
- V4L2_PRIORITY_RECORD,
- V4L2_PRIORITY_DEFAULT,
-) = range(0, 4) + [2]
-
-
-class v4l2_rect(ctypes.Structure):
- _fields_ = [
- ('left', ctypes.c_int32),
- ('top', ctypes.c_int32),
- ('width', ctypes.c_int32),
- ('height', ctypes.c_int32),
- ]
-
-
-class v4l2_fract(ctypes.Structure):
- _fields_ = [
- ('numerator', ctypes.c_uint32),
- ('denominator', ctypes.c_uint32),
- ]
-
-
-#
-# Driver capabilities
-#
-
-class v4l2_capability(ctypes.Structure):
- _fields_ = [
- ('driver', ctypes.c_char * 16),
- ('card', ctypes.c_char * 32),
- ('bus_info', ctypes.c_char * 32),
- ('version', ctypes.c_uint32),
- ('capabilities', ctypes.c_uint32),
- ('reserved', ctypes.c_uint32 * 4),
- ]
-
-
-#
-# Values for 'capabilities' field
-#
-
-V4L2_CAP_VIDEO_CAPTURE = 0x00000001
-V4L2_CAP_VIDEO_OUTPUT = 0x00000002
-V4L2_CAP_VIDEO_OVERLAY = 0x00000004
-V4L2_CAP_VBI_CAPTURE = 0x00000010
-V4L2_CAP_VBI_OUTPUT = 0x00000020
-V4L2_CAP_SLICED_VBI_CAPTURE = 0x00000040
-V4L2_CAP_SLICED_VBI_OUTPUT = 0x00000080
-V4L2_CAP_RDS_CAPTURE = 0x00000100
-V4L2_CAP_VIDEO_OUTPUT_OVERLAY = 0x00000200
-V4L2_CAP_HW_FREQ_SEEK = 0x00000400
-V4L2_CAP_RDS_OUTPUT = 0x00000800
-
-V4L2_CAP_TUNER = 0x00010000
-V4L2_CAP_AUDIO = 0x00020000
-V4L2_CAP_RADIO = 0x00040000
-V4L2_CAP_MODULATOR = 0x00080000
-
-V4L2_CAP_READWRITE = 0x01000000
-V4L2_CAP_ASYNCIO = 0x02000000
-V4L2_CAP_STREAMING = 0x04000000
-
-
-#
-# Video image format
-#
-
-class v4l2_pix_format(ctypes.Structure):
- _fields_ = [
- ('width', ctypes.c_uint32),
- ('height', ctypes.c_uint32),
- ('pixelformat', ctypes.c_uint32),
- ('field', v4l2_field),
- ('bytesperline', ctypes.c_uint32),
- ('sizeimage', ctypes.c_uint32),
- ('colorspace', v4l2_colorspace),
- ('priv', ctypes.c_uint32),
- ]
-
-# RGB formats
-V4L2_PIX_FMT_RGB332 = v4l2_fourcc('R', 'G', 'B', '1')
-V4L2_PIX_FMT_RGB444 = v4l2_fourcc('R', '4', '4', '4')
-V4L2_PIX_FMT_RGB555 = v4l2_fourcc('R', 'G', 'B', 'O')
-V4L2_PIX_FMT_RGB565 = v4l2_fourcc('R', 'G', 'B', 'P')
-V4L2_PIX_FMT_RGB555X = v4l2_fourcc('R', 'G', 'B', 'Q')
-V4L2_PIX_FMT_RGB565X = v4l2_fourcc('R', 'G', 'B', 'R')
-V4L2_PIX_FMT_BGR24 = v4l2_fourcc('B', 'G', 'R', '3')
-V4L2_PIX_FMT_RGB24 = v4l2_fourcc('R', 'G', 'B', '3')
-V4L2_PIX_FMT_BGR32 = v4l2_fourcc('B', 'G', 'R', '4')
-V4L2_PIX_FMT_RGB32 = v4l2_fourcc('R', 'G', 'B', '4')
-
-# Grey formats
-V4L2_PIX_FMT_GREY = v4l2_fourcc('G', 'R', 'E', 'Y')
-V4L2_PIX_FMT_Y10 = v4l2_fourcc('Y', '1', '0', ' ')
-V4L2_PIX_FMT_Y16 = v4l2_fourcc('Y', '1', '6', ' ')
-
-# Palette formats
-V4L2_PIX_FMT_PAL8 = v4l2_fourcc('P', 'A', 'L', '8')
-
-# Luminance+Chrominance formats
-V4L2_PIX_FMT_YVU410 = v4l2_fourcc('Y', 'V', 'U', '9')
-V4L2_PIX_FMT_YVU420 = v4l2_fourcc('Y', 'V', '1', '2')
-V4L2_PIX_FMT_YUYV = v4l2_fourcc('Y', 'U', 'Y', 'V')
-V4L2_PIX_FMT_YYUV = v4l2_fourcc('Y', 'Y', 'U', 'V')
-V4L2_PIX_FMT_YVYU = v4l2_fourcc('Y', 'V', 'Y', 'U')
-V4L2_PIX_FMT_UYVY = v4l2_fourcc('U', 'Y', 'V', 'Y')
-V4L2_PIX_FMT_VYUY = v4l2_fourcc('V', 'Y', 'U', 'Y')
-V4L2_PIX_FMT_YUV422P = v4l2_fourcc('4', '2', '2', 'P')
-V4L2_PIX_FMT_YUV411P = v4l2_fourcc('4', '1', '1', 'P')
-V4L2_PIX_FMT_Y41P = v4l2_fourcc('Y', '4', '1', 'P')
-V4L2_PIX_FMT_YUV444 = v4l2_fourcc('Y', '4', '4', '4')
-V4L2_PIX_FMT_YUV555 = v4l2_fourcc('Y', 'U', 'V', 'O')
-V4L2_PIX_FMT_YUV565 = v4l2_fourcc('Y', 'U', 'V', 'P')
-V4L2_PIX_FMT_YUV32 = v4l2_fourcc('Y', 'U', 'V', '4')
-V4L2_PIX_FMT_YUV410 = v4l2_fourcc('Y', 'U', 'V', '9')
-V4L2_PIX_FMT_YUV420 = v4l2_fourcc('Y', 'U', '1', '2')
-V4L2_PIX_FMT_HI240 = v4l2_fourcc('H', 'I', '2', '4')
-V4L2_PIX_FMT_HM12 = v4l2_fourcc('H', 'M', '1', '2')
-
-# two planes -- one Y, one Cr + Cb interleaved
-V4L2_PIX_FMT_NV12 = v4l2_fourcc('N', 'V', '1', '2')
-V4L2_PIX_FMT_NV21 = v4l2_fourcc('N', 'V', '2', '1')
-V4L2_PIX_FMT_NV16 = v4l2_fourcc('N', 'V', '1', '6')
-V4L2_PIX_FMT_NV61 = v4l2_fourcc('N', 'V', '6', '1')
-
-# Bayer formats - see http://www.siliconimaging.com/RGB%20Bayer.htm
-V4L2_PIX_FMT_SBGGR8 = v4l2_fourcc('B', 'A', '8', '1')
-V4L2_PIX_FMT_SGBRG8 = v4l2_fourcc('G', 'B', 'R', 'G')
-V4L2_PIX_FMT_SGRBG8 = v4l2_fourcc('G', 'R', 'B', 'G')
-V4L2_PIX_FMT_SRGGB8 = v4l2_fourcc('R', 'G', 'G', 'B')
-V4L2_PIX_FMT_SBGGR10 = v4l2_fourcc('B', 'G', '1', '0')
-V4L2_PIX_FMT_SGBRG10 = v4l2_fourcc('G', 'B', '1', '0')
-V4L2_PIX_FMT_SGRBG10 = v4l2_fourcc('B', 'A', '1', '0')
-V4L2_PIX_FMT_SRGGB10 = v4l2_fourcc('R', 'G', '1', '0')
-V4L2_PIX_FMT_SGRBG10DPCM8 = v4l2_fourcc('B', 'D', '1', '0')
-V4L2_PIX_FMT_SBGGR16 = v4l2_fourcc('B', 'Y', 'R', '2')
-
-# compressed formats
-V4L2_PIX_FMT_MJPEG = v4l2_fourcc('M', 'J', 'P', 'G')
-V4L2_PIX_FMT_JPEG = v4l2_fourcc('J', 'P', 'E', 'G')
-V4L2_PIX_FMT_DV = v4l2_fourcc('d', 'v', 's', 'd')
-V4L2_PIX_FMT_MPEG = v4l2_fourcc('M', 'P', 'E', 'G')
-
-# Vendor-specific formats
-V4L2_PIX_FMT_CPIA1 = v4l2_fourcc('C', 'P', 'I', 'A')
-V4L2_PIX_FMT_WNVA = v4l2_fourcc('W', 'N', 'V', 'A')
-V4L2_PIX_FMT_SN9C10X = v4l2_fourcc('S', '9', '1', '0')
-V4L2_PIX_FMT_SN9C20X_I420 = v4l2_fourcc('S', '9', '2', '0')
-V4L2_PIX_FMT_PWC1 = v4l2_fourcc('P', 'W', 'C', '1')
-V4L2_PIX_FMT_PWC2 = v4l2_fourcc('P', 'W', 'C', '2')
-V4L2_PIX_FMT_ET61X251 = v4l2_fourcc('E', '6', '2', '5')
-V4L2_PIX_FMT_SPCA501 = v4l2_fourcc('S', '5', '0', '1')
-V4L2_PIX_FMT_SPCA505 = v4l2_fourcc('S', '5', '0', '5')
-V4L2_PIX_FMT_SPCA508 = v4l2_fourcc('S', '5', '0', '8')
-V4L2_PIX_FMT_SPCA561 = v4l2_fourcc('S', '5', '6', '1')
-V4L2_PIX_FMT_PAC207 = v4l2_fourcc('P', '2', '0', '7')
-V4L2_PIX_FMT_MR97310A = v4l2_fourcc('M', '3', '1', '0')
-V4L2_PIX_FMT_SN9C2028 = v4l2_fourcc('S', 'O', 'N', 'X')
-V4L2_PIX_FMT_SQ905C = v4l2_fourcc('9', '0', '5', 'C')
-V4L2_PIX_FMT_PJPG = v4l2_fourcc('P', 'J', 'P', 'G')
-V4L2_PIX_FMT_OV511 = v4l2_fourcc('O', '5', '1', '1')
-V4L2_PIX_FMT_OV518 = v4l2_fourcc('O', '5', '1', '8')
-V4L2_PIX_FMT_STV0680 = v4l2_fourcc('S', '6', '8', '0')
-
-
-#
-# Format enumeration
-#
-
-class v4l2_fmtdesc(ctypes.Structure):
- _fields_ = [
- ('index', ctypes.c_uint32),
- ('type', ctypes.c_int),
- ('flags', ctypes.c_uint32),
- ('description', ctypes.c_char * 32),
- ('pixelformat', ctypes.c_uint32),
- ('reserved', ctypes.c_uint32 * 4),
- ]
-
-V4L2_FMT_FLAG_COMPRESSED = 0x0001
-V4L2_FMT_FLAG_EMULATED = 0x0002
-
-
-#
-# Experimental frame size and frame rate enumeration
-#
-
-v4l2_frmsizetypes = enum
-(
- V4L2_FRMSIZE_TYPE_DISCRETE,
- V4L2_FRMSIZE_TYPE_CONTINUOUS,
- V4L2_FRMSIZE_TYPE_STEPWISE,
-) = range(1, 4)
-
-
-class v4l2_frmsize_discrete(ctypes.Structure):
- _fields_ = [
- ('width', ctypes.c_uint32),
- ('height', ctypes.c_uint32),
- ]
-
-
-class v4l2_frmsize_stepwise(ctypes.Structure):
- _fields_ = [
- ('min_width', ctypes.c_uint32),
- ('min_height', ctypes.c_uint32),
- ('step_width', ctypes.c_uint32),
- ('min_height', ctypes.c_uint32),
- ('max_height', ctypes.c_uint32),
- ('step_height', ctypes.c_uint32),
- ]
-
-
-class v4l2_frmsizeenum(ctypes.Structure):
- class _u(ctypes.Union):
- _fields_ = [
- ('discrete', v4l2_frmsize_discrete),
- ('stepwise', v4l2_frmsize_stepwise),
- ]
-
- _fields_ = [
- ('index', ctypes.c_uint32),
- ('pixel_format', ctypes.c_uint32),
- ('type', ctypes.c_uint32),
- ('_u', _u),
- ('reserved', ctypes.c_uint32 * 2)
- ]
-
- _anonymous_ = ('_u',)
-
-
-#
-# Frame rate enumeration
-#
-
-v4l2_frmivaltypes = enum
-(
- V4L2_FRMIVAL_TYPE_DISCRETE,
- V4L2_FRMIVAL_TYPE_CONTINUOUS,
- V4L2_FRMIVAL_TYPE_STEPWISE,
-) = range(1, 4)
-
-
-class v4l2_frmival_stepwise(ctypes.Structure):
- _fields_ = [
- ('min', v4l2_fract),
- ('max', v4l2_fract),
- ('step', v4l2_fract),
- ]
-
-
-class v4l2_frmivalenum(ctypes.Structure):
- class _u(ctypes.Union):
- _fields_ = [
- ('discrete', v4l2_fract),
- ('stepwise', v4l2_frmival_stepwise),
- ]
-
- _fields_ = [
- ('index', ctypes.c_uint32),
- ('pixel_format', ctypes.c_uint32),
- ('width', ctypes.c_uint32),
- ('height', ctypes.c_uint32),
- ('type', ctypes.c_uint32),
- ('_u', _u),
- ('reserved', ctypes.c_uint32 * 2),
- ]
-
- _anonymous_ = ('_u',)
-
-
-#
-# Timecode
-#
-
-class v4l2_timecode(ctypes.Structure):
- _fields_ = [
- ('type', ctypes.c_uint32),
- ('flags', ctypes.c_uint32),
- ('frames', ctypes.c_uint8),
- ('seconds', ctypes.c_uint8),
- ('minutes', ctypes.c_uint8),
- ('hours', ctypes.c_uint8),
- ('userbits', ctypes.c_uint8 * 4),
- ]
-
-
-V4L2_TC_TYPE_24FPS = 1
-V4L2_TC_TYPE_25FPS = 2
-V4L2_TC_TYPE_30FPS = 3
-V4L2_TC_TYPE_50FPS = 4
-V4L2_TC_TYPE_60FPS = 5
-
-V4L2_TC_FLAG_DROPFRAME = 0x0001
-V4L2_TC_FLAG_COLORFRAME = 0x0002
-V4L2_TC_USERBITS_field = 0x000C
-V4L2_TC_USERBITS_USERDEFINED = 0x0000
-V4L2_TC_USERBITS_8BITCHARS = 0x0008
-
-
-class v4l2_jpegcompression(ctypes.Structure):
- _fields_ = [
- ('quality', ctypes.c_int),
- ('APPn', ctypes.c_int),
- ('APP_len', ctypes.c_int),
- ('APP_data', ctypes.c_char * 60),
- ('COM_len', ctypes.c_int),
- ('COM_data', ctypes.c_char * 60),
- ('jpeg_markers', ctypes.c_uint32),
- ]
-
-
-V4L2_JPEG_MARKER_DHT = 1 << 3
-V4L2_JPEG_MARKER_DQT = 1 << 4
-V4L2_JPEG_MARKER_DRI = 1 << 5
-V4L2_JPEG_MARKER_COM = 1 << 6
-V4L2_JPEG_MARKER_APP = 1 << 7
-
-
-#
-# Memory-mapping buffers
-#
-
-class v4l2_requestbuffers(ctypes.Structure):
- _fields_ = [
- ('count', ctypes.c_uint32),
- ('type', v4l2_buf_type),
- ('memory', v4l2_memory),
- ('reserved', ctypes.c_uint32 * 2),
- ]
-
-
-class v4l2_buffer(ctypes.Structure):
- class _u(ctypes.Union):
- _fields_ = [
- ('offset', ctypes.c_uint32),
- ('userptr', ctypes.c_ulong),
- ]
-
- _fields_ = [
- ('index', ctypes.c_uint32),
- ('type', v4l2_buf_type),
- ('bytesused', ctypes.c_uint32),
- ('flags', ctypes.c_uint32),
- ('field', v4l2_field),
- ('timestamp', timeval),
- ('timecode', v4l2_timecode),
- ('sequence', ctypes.c_uint32),
- ('memory', v4l2_memory),
- ('m', _u),
- ('length', ctypes.c_uint32),
- ('input', ctypes.c_uint32),
- ('reserved', ctypes.c_uint32),
- ]
-
-
-V4L2_BUF_FLAG_MAPPED = 0x0001
-V4L2_BUF_FLAG_QUEUED = 0x0002
-V4L2_BUF_FLAG_DONE = 0x0004
-V4L2_BUF_FLAG_KEYFRAME = 0x0008
-V4L2_BUF_FLAG_PFRAME = 0x0010
-V4L2_BUF_FLAG_BFRAME = 0x0020
-V4L2_BUF_FLAG_TIMECODE = 0x0100
-V4L2_BUF_FLAG_INPUT = 0x0200
-
-
-#
-# Overlay preview
-#
-
-class v4l2_framebuffer(ctypes.Structure):
- _fields_ = [
- ('capability', ctypes.c_uint32),
- ('flags', ctypes.c_uint32),
- ('base', ctypes.c_void_p),
- ('fmt', v4l2_pix_format),
- ]
-
-V4L2_FBUF_CAP_EXTERNOVERLAY = 0x0001
-V4L2_FBUF_CAP_CHROMAKEY = 0x0002
-V4L2_FBUF_CAP_LIST_CLIPPING = 0x0004
-V4L2_FBUF_CAP_BITMAP_CLIPPING = 0x0008
-V4L2_FBUF_CAP_LOCAL_ALPHA = 0x0010
-V4L2_FBUF_CAP_GLOBAL_ALPHA = 0x0020
-V4L2_FBUF_CAP_LOCAL_INV_ALPHA = 0x0040
-V4L2_FBUF_CAP_SRC_CHROMAKEY = 0x0080
-
-V4L2_FBUF_FLAG_PRIMARY = 0x0001
-V4L2_FBUF_FLAG_OVERLAY = 0x0002
-V4L2_FBUF_FLAG_CHROMAKEY = 0x0004
-V4L2_FBUF_FLAG_LOCAL_ALPHA = 0x0008
-V4L2_FBUF_FLAG_GLOBAL_ALPHA = 0x0010
-V4L2_FBUF_FLAG_LOCAL_INV_ALPHA = 0x0020
-V4L2_FBUF_FLAG_SRC_CHROMAKEY = 0x0040
-
-
-class v4l2_clip(ctypes.Structure):
- pass
-v4l2_clip._fields_ = [
- ('c', v4l2_rect),
- ('next', ctypes.POINTER(v4l2_clip)),
-]
-
-
-class v4l2_window(ctypes.Structure):
- _fields_ = [
- ('w', v4l2_rect),
- ('field', v4l2_field),
- ('chromakey', ctypes.c_uint32),
- ('clips', ctypes.POINTER(v4l2_clip)),
- ('clipcount', ctypes.c_uint32),
- ('bitmap', ctypes.c_void_p),
- ('global_alpha', ctypes.c_uint8),
- ]
-
-
-#
-# Capture parameters
-#
-
-class v4l2_captureparm(ctypes.Structure):
- _fields_ = [
- ('capability', ctypes.c_uint32),
- ('capturemode', ctypes.c_uint32),
- ('timeperframe', v4l2_fract),
- ('extendedmode', ctypes.c_uint32),
- ('readbuffers', ctypes.c_uint32),
- ('reserved', ctypes.c_uint32 * 4),
- ]
-
-
-V4L2_MODE_HIGHQUALITY = 0x0001
-V4L2_CAP_TIMEPERFRAME = 0x1000
-
-
-class v4l2_outputparm(ctypes.Structure):
- _fields_ = [
- ('capability', ctypes.c_uint32),
- ('outputmode', ctypes.c_uint32),
- ('timeperframe', v4l2_fract),
- ('extendedmode', ctypes.c_uint32),
- ('writebuffers', ctypes.c_uint32),
- ('reserved', ctypes.c_uint32 * 4),
- ]
-
-
-#
-# Input image cropping
-#
-
-class v4l2_cropcap(ctypes.Structure):
- _fields_ = [
- ('type', v4l2_buf_type),
- ('bounds', v4l2_rect),
- ('defrect', v4l2_rect),
- ('pixelaspect', v4l2_fract),
- ]
-
-
-class v4l2_crop(ctypes.Structure):
- _fields_ = [
- ('type', ctypes.c_int),
- ('c', v4l2_rect),
- ]
-
-
-#
-# Analog video standard
-#
-
-v4l2_std_id = ctypes.c_uint64
-
-
-V4L2_STD_PAL_B = 0x00000001
-V4L2_STD_PAL_B1 = 0x00000002
-V4L2_STD_PAL_G = 0x00000004
-V4L2_STD_PAL_H = 0x00000008
-V4L2_STD_PAL_I = 0x00000010
-V4L2_STD_PAL_D = 0x00000020
-V4L2_STD_PAL_D1 = 0x00000040
-V4L2_STD_PAL_K = 0x00000080
-
-V4L2_STD_PAL_M = 0x00000100
-V4L2_STD_PAL_N = 0x00000200
-V4L2_STD_PAL_Nc = 0x00000400
-V4L2_STD_PAL_60 = 0x00000800
-
-V4L2_STD_NTSC_M = 0x00001000
-V4L2_STD_NTSC_M_JP = 0x00002000
-V4L2_STD_NTSC_443 = 0x00004000
-V4L2_STD_NTSC_M_KR = 0x00008000
-
-V4L2_STD_SECAM_B = 0x00010000
-V4L2_STD_SECAM_D = 0x00020000
-V4L2_STD_SECAM_G = 0x00040000
-V4L2_STD_SECAM_H = 0x00080000
-V4L2_STD_SECAM_K = 0x00100000
-V4L2_STD_SECAM_K1 = 0x00200000
-V4L2_STD_SECAM_L = 0x00400000
-V4L2_STD_SECAM_LC = 0x00800000
-
-V4L2_STD_ATSC_8_VSB = 0x01000000
-V4L2_STD_ATSC_16_VSB = 0x02000000
-
-
-# some common needed stuff
-V4L2_STD_PAL_BG = (V4L2_STD_PAL_B | V4L2_STD_PAL_B1 | V4L2_STD_PAL_G)
-V4L2_STD_PAL_DK = (V4L2_STD_PAL_D | V4L2_STD_PAL_D1 | V4L2_STD_PAL_K)
-V4L2_STD_PAL = (V4L2_STD_PAL_BG | V4L2_STD_PAL_DK | V4L2_STD_PAL_H | V4L2_STD_PAL_I)
-V4L2_STD_NTSC = (V4L2_STD_NTSC_M | V4L2_STD_NTSC_M_JP | V4L2_STD_NTSC_M_KR)
-V4L2_STD_SECAM_DK = (V4L2_STD_SECAM_D | V4L2_STD_SECAM_K | V4L2_STD_SECAM_K1)
-V4L2_STD_SECAM = (V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H | V4L2_STD_SECAM_DK | V4L2_STD_SECAM_L | V4L2_STD_SECAM_LC)
-
-V4L2_STD_525_60 = (V4L2_STD_PAL_M | V4L2_STD_PAL_60 | V4L2_STD_NTSC | V4L2_STD_NTSC_443)
-V4L2_STD_625_50 = (V4L2_STD_PAL | V4L2_STD_PAL_N | V4L2_STD_PAL_Nc | V4L2_STD_SECAM)
-V4L2_STD_ATSC = (V4L2_STD_ATSC_8_VSB | V4L2_STD_ATSC_16_VSB)
-
-V4L2_STD_UNKNOWN = 0
-V4L2_STD_ALL = (V4L2_STD_525_60 | V4L2_STD_625_50)
-
-# some merged standards
-V4L2_STD_MN = (V4L2_STD_PAL_M | V4L2_STD_PAL_N | V4L2_STD_PAL_Nc | V4L2_STD_NTSC)
-V4L2_STD_B = (V4L2_STD_PAL_B | V4L2_STD_PAL_B1 | V4L2_STD_SECAM_B)
-V4L2_STD_GH = (V4L2_STD_PAL_G | V4L2_STD_PAL_H|V4L2_STD_SECAM_G | V4L2_STD_SECAM_H)
-V4L2_STD_DK = (V4L2_STD_PAL_DK | V4L2_STD_SECAM_DK)
-
-
-class v4l2_standard(ctypes.Structure):
- _fields_ = [
- ('index', ctypes.c_uint32),
- ('id', v4l2_std_id),
- ('name', ctypes.c_char * 24),
- ('frameperiod', v4l2_fract),
- ('framelines', ctypes.c_uint32),
- ('reserved', ctypes.c_uint32 * 4),
- ]
-
-
-#
-# Video timings dv preset
-#
-
-class v4l2_dv_preset(ctypes.Structure):
- _fields_ = [
- ('preset', ctypes.c_uint32),
- ('reserved', ctypes.c_uint32 * 4)
- ]
-
-
-#
-# DV preset enumeration
-#
-
-class v4l2_dv_enum_preset(ctypes.Structure):
- _fields_ = [
- ('index', ctypes.c_uint32),
- ('preset', ctypes.c_uint32),
- ('name', ctypes.c_char * 32),
- ('width', ctypes.c_uint32),
- ('height', ctypes.c_uint32),
- ('reserved', ctypes.c_uint32 * 4),
- ]
-
-#
-# DV preset values
-#
-
-V4L2_DV_INVALID = 0
-V4L2_DV_480P59_94 = 1
-V4L2_DV_576P50 = 2
-V4L2_DV_720P24 = 3
-V4L2_DV_720P25 = 4
-V4L2_DV_720P30 = 5
-V4L2_DV_720P50 = 6
-V4L2_DV_720P59_94 = 7
-V4L2_DV_720P60 = 8
-V4L2_DV_1080I29_97 = 9
-V4L2_DV_1080I30 = 10
-V4L2_DV_1080I25 = 11
-V4L2_DV_1080I50 = 12
-V4L2_DV_1080I60 = 13
-V4L2_DV_1080P24 = 14
-V4L2_DV_1080P25 = 15
-V4L2_DV_1080P30 = 16
-V4L2_DV_1080P50 = 17
-V4L2_DV_1080P60 = 18
-
-
-#
-# DV BT timings
-#
-
-class v4l2_bt_timings(ctypes.Structure):
- _fields_ = [
- ('width', ctypes.c_uint32),
- ('height', ctypes.c_uint32),
- ('interlaced', ctypes.c_uint32),
- ('polarities', ctypes.c_uint32),
- ('pixelclock', ctypes.c_uint64),
- ('hfrontporch', ctypes.c_uint32),
- ('hsync', ctypes.c_uint32),
- ('hbackporch', ctypes.c_uint32),
- ('vfrontporch', ctypes.c_uint32),
- ('vsync', ctypes.c_uint32),
- ('vbackporch', ctypes.c_uint32),
- ('il_vfrontporch', ctypes.c_uint32),
- ('il_vsync', ctypes.c_uint32),
- ('il_vbackporch', ctypes.c_uint32),
- ('reserved', ctypes.c_uint32 * 16),
- ]
-
- _pack_ = True
-
-# Interlaced or progressive format
-V4L2_DV_PROGRESSIVE = 0
-V4L2_DV_INTERLACED = 1
-
-# Polarities. If bit is not set, it is assumed to be negative polarity
-V4L2_DV_VSYNC_POS_POL = 0x00000001
-V4L2_DV_HSYNC_POS_POL = 0x00000002
-
-
-class v4l2_dv_timings(ctypes.Structure):
- class _u(ctypes.Union):
- _fields_ = [
- ('bt', v4l2_bt_timings),
- ('reserved', ctypes.c_uint32 * 32),
- ]
-
- _fields_ = [
- ('type', ctypes.c_uint32),
- ('_u', _u),
- ]
-
- _anonymous_ = ('_u',)
- _pack_ = True
-
-
-# Values for the type field
-V4L2_DV_BT_656_1120 = 0
-
-
-#
-# Video inputs
-#
-
-class v4l2_input(ctypes.Structure):
- _fields_ = [
- ('index', ctypes.c_uint32),
- ('name', ctypes.c_char * 32),
- ('type', ctypes.c_uint32),
- ('audioset', ctypes.c_uint32),
- ('tuner', ctypes.c_uint32),
- ('std', v4l2_std_id),
- ('status', ctypes.c_uint32),
- ('reserved', ctypes.c_uint32 * 4),
- ]
-
-
-V4L2_INPUT_TYPE_TUNER = 1
-V4L2_INPUT_TYPE_CAMERA = 2
-
-V4L2_IN_ST_NO_POWER = 0x00000001
-V4L2_IN_ST_NO_SIGNAL = 0x00000002
-V4L2_IN_ST_NO_COLOR = 0x00000004
-
-V4L2_IN_ST_HFLIP = 0x00000010
-V4L2_IN_ST_VFLIP = 0x00000020
-
-V4L2_IN_ST_NO_H_LOCK = 0x00000100
-V4L2_IN_ST_COLOR_KILL = 0x00000200
-
-V4L2_IN_ST_NO_SYNC = 0x00010000
-V4L2_IN_ST_NO_EQU = 0x00020000
-V4L2_IN_ST_NO_CARRIER = 0x00040000
-
-V4L2_IN_ST_MACROVISION = 0x01000000
-V4L2_IN_ST_NO_ACCESS = 0x02000000
-V4L2_IN_ST_VTR = 0x04000000
-
-V4L2_IN_CAP_PRESETS = 0x00000001
-V4L2_IN_CAP_CUSTOM_TIMINGS = 0x00000002
-V4L2_IN_CAP_STD = 0x00000004
-
-#
-# Video outputs
-#
-
-class v4l2_output(ctypes.Structure):
- _fields_ = [
- ('index', ctypes.c_uint32),
- ('name', ctypes.c_char * 32),
- ('type', ctypes.c_uint32),
- ('audioset', ctypes.c_uint32),
- ('modulator', ctypes.c_uint32),
- ('std', v4l2_std_id),
- ('reserved', ctypes.c_uint32 * 4),
- ]
-
-
-V4L2_OUTPUT_TYPE_MODULATOR = 1
-V4L2_OUTPUT_TYPE_ANALOG = 2
-V4L2_OUTPUT_TYPE_ANALOGVGAOVERLAY = 3
-
-V4L2_OUT_CAP_PRESETS = 0x00000001
-V4L2_OUT_CAP_CUSTOM_TIMINGS = 0x00000002
-V4L2_OUT_CAP_STD = 0x00000004
-
-#
-# Controls
-#
-
-class v4l2_control(ctypes.Structure):
- _fields_ = [
- ('id', ctypes.c_uint32),
- ('value', ctypes.c_int32),
- ]
-
-
-class v4l2_ext_control(ctypes.Structure):
- class _u(ctypes.Union):
- _fields_ = [
- ('value', ctypes.c_int32),
- ('value64', ctypes.c_int64),
- ('reserved', ctypes.c_void_p),
- ]
-
- _fields_ = [
- ('id', ctypes.c_uint32),
- ('reserved2', ctypes.c_uint32 * 2),
- ('_u', _u)
- ]
-
- _anonymous_ = ('_u',)
- _pack_ = True
-
-
-class v4l2_ext_controls(ctypes.Structure):
- _fields_ = [
- ('ctrl_class', ctypes.c_uint32),
- ('count', ctypes.c_uint32),
- ('error_idx', ctypes.c_uint32),
- ('reserved', ctypes.c_uint32 * 2),
- ('controls', ctypes.POINTER(v4l2_ext_control)),
- ]
-
-
-V4L2_CTRL_CLASS_USER = 0x00980000
-V4L2_CTRL_CLASS_MPEG = 0x00990000
-V4L2_CTRL_CLASS_CAMERA = 0x009a0000
-V4L2_CTRL_CLASS_FM_TX = 0x009b0000
-
-
-def V4L2_CTRL_ID_MASK():
- return 0x0fffffff
-
-
-def V4L2_CTRL_ID2CLASS(id_):
- return id_ & 0x0fff0000 # unsigned long
-
-
-def V4L2_CTRL_DRIVER_PRIV(id_):
- return (id_ & 0xffff) >= 0x1000
-
-
-class v4l2_queryctrl(ctypes.Structure):
- _fields_ = [
- ('id', ctypes.c_uint32),
- ('type', v4l2_ctrl_type),
- ('name', ctypes.c_char * 32),
- ('minimum', ctypes.c_int32),
- ('maximum', ctypes.c_int32),
- ('step', ctypes.c_int32),
- ('default', ctypes.c_int32),
- ('flags', ctypes.c_uint32),
- ('reserved', ctypes.c_uint32 * 2),
- ]
-
-
-class v4l2_querymenu(ctypes.Structure):
- _fields_ = [
- ('id', ctypes.c_uint32),
- ('index', ctypes.c_uint32),
- ('name', ctypes.c_char * 32),
- ('reserved', ctypes.c_uint32),
- ]
-
-
-V4L2_CTRL_FLAG_DISABLED = 0x0001
-V4L2_CTRL_FLAG_GRABBED = 0x0002
-V4L2_CTRL_FLAG_READ_ONLY = 0x0004
-V4L2_CTRL_FLAG_UPDATE = 0x0008
-V4L2_CTRL_FLAG_INACTIVE = 0x0010
-V4L2_CTRL_FLAG_SLIDER = 0x0020
-V4L2_CTRL_FLAG_WRITE_ONLY = 0x0040
-
-V4L2_CTRL_FLAG_NEXT_CTRL = 0x80000000
-
-V4L2_CID_BASE = V4L2_CTRL_CLASS_USER | 0x900
-V4L2_CID_USER_BASE = V4L2_CID_BASE
-V4L2_CID_PRIVATE_BASE = 0x08000000
-
-V4L2_CID_USER_CLASS = V4L2_CTRL_CLASS_USER | 1
-V4L2_CID_BRIGHTNESS = V4L2_CID_BASE + 0
-V4L2_CID_CONTRAST = V4L2_CID_BASE + 1
-V4L2_CID_SATURATION = V4L2_CID_BASE + 2
-V4L2_CID_HUE = V4L2_CID_BASE + 3
-V4L2_CID_AUDIO_VOLUME = V4L2_CID_BASE + 5
-V4L2_CID_AUDIO_BALANCE = V4L2_CID_BASE + 6
-V4L2_CID_AUDIO_BASS = V4L2_CID_BASE + 7
-V4L2_CID_AUDIO_TREBLE = V4L2_CID_BASE + 8
-V4L2_CID_AUDIO_MUTE = V4L2_CID_BASE + 9
-V4L2_CID_AUDIO_LOUDNESS = V4L2_CID_BASE + 10
-V4L2_CID_BLACK_LEVEL = V4L2_CID_BASE + 11 # Deprecated
-V4L2_CID_AUTO_WHITE_BALANCE = V4L2_CID_BASE + 12
-V4L2_CID_DO_WHITE_BALANCE = V4L2_CID_BASE + 13
-V4L2_CID_RED_BALANCE = V4L2_CID_BASE + 14
-V4L2_CID_BLUE_BALANCE = V4L2_CID_BASE + 15
-V4L2_CID_GAMMA = V4L2_CID_BASE + 16
-V4L2_CID_WHITENESS = V4L2_CID_GAMMA # Deprecated
-V4L2_CID_EXPOSURE = V4L2_CID_BASE + 17
-V4L2_CID_AUTOGAIN = V4L2_CID_BASE + 18
-V4L2_CID_GAIN = V4L2_CID_BASE + 19
-V4L2_CID_HFLIP = V4L2_CID_BASE + 20
-V4L2_CID_VFLIP = V4L2_CID_BASE + 21
-
-# Deprecated; use V4L2_CID_PAN_RESET and V4L2_CID_TILT_RESET
-V4L2_CID_HCENTER = V4L2_CID_BASE + 22
-V4L2_CID_VCENTER = V4L2_CID_BASE + 23
-
-V4L2_CID_POWER_LINE_FREQUENCY = V4L2_CID_BASE + 24
-
-v4l2_power_line_frequency = enum
-(
- V4L2_CID_POWER_LINE_FREQUENCY_DISABLED,
- V4L2_CID_POWER_LINE_FREQUENCY_50HZ,
- V4L2_CID_POWER_LINE_FREQUENCY_60HZ,
-) = range(3)
-
-V4L2_CID_HUE_AUTO = V4L2_CID_BASE + 25
-V4L2_CID_WHITE_BALANCE_TEMPERATURE = V4L2_CID_BASE + 26
-V4L2_CID_SHARPNESS = V4L2_CID_BASE + 27
-V4L2_CID_BACKLIGHT_COMPENSATION = V4L2_CID_BASE + 28
-V4L2_CID_CHROMA_AGC = V4L2_CID_BASE + 29
-V4L2_CID_COLOR_KILLER = V4L2_CID_BASE + 30
-V4L2_CID_COLORFX = V4L2_CID_BASE + 31
-
-v4l2_colorfx = enum
-(
- V4L2_COLORFX_NONE,
- V4L2_COLORFX_BW,
- V4L2_COLORFX_SEPIA,
-) = range(3)
-
-V4L2_CID_AUTOBRIGHTNESS = V4L2_CID_BASE + 32
-V4L2_CID_BAND_STOP_FILTER = V4L2_CID_BASE + 33
-
-V4L2_CID_ROTATE = V4L2_CID_BASE + 34
-V4L2_CID_BG_COLOR = V4L2_CID_BASE + 35
-V4L2_CID_LASTP1 = V4L2_CID_BASE + 36
-
-V4L2_CID_MPEG_BASE = V4L2_CTRL_CLASS_MPEG | 0x900
-V4L2_CID_MPEG_CLASS = V4L2_CTRL_CLASS_MPEG | 1
-
-# MPEG streams
-V4L2_CID_MPEG_STREAM_TYPE = V4L2_CID_MPEG_BASE + 0
-
-v4l2_mpeg_stream_type = enum
-(
- V4L2_MPEG_STREAM_TYPE_MPEG2_PS,
- V4L2_MPEG_STREAM_TYPE_MPEG2_TS,
- V4L2_MPEG_STREAM_TYPE_MPEG1_SS,
- V4L2_MPEG_STREAM_TYPE_MPEG2_DVD,
- V4L2_MPEG_STREAM_TYPE_MPEG1_VCD,
- V4L2_MPEG_STREAM_TYPE_MPEG2_SVCD,
-) = range(6)
-
-V4L2_CID_MPEG_STREAM_PID_PMT = V4L2_CID_MPEG_BASE + 1
-V4L2_CID_MPEG_STREAM_PID_AUDIO = V4L2_CID_MPEG_BASE + 2
-V4L2_CID_MPEG_STREAM_PID_VIDEO = V4L2_CID_MPEG_BASE + 3
-V4L2_CID_MPEG_STREAM_PID_PCR = V4L2_CID_MPEG_BASE + 4
-V4L2_CID_MPEG_STREAM_PES_ID_AUDIO = V4L2_CID_MPEG_BASE + 5
-V4L2_CID_MPEG_STREAM_PES_ID_VIDEO = V4L2_CID_MPEG_BASE + 6
-V4L2_CID_MPEG_STREAM_VBI_FMT = V4L2_CID_MPEG_BASE + 7
-
-v4l2_mpeg_stream_vbi_fmt = enum
-(
- V4L2_MPEG_STREAM_VBI_FMT_NONE,
- V4L2_MPEG_STREAM_VBI_FMT_IVTV,
-) = range(2)
-
-V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ = V4L2_CID_MPEG_BASE + 100
-
-v4l2_mpeg_audio_sampling_freq = enum
-(
- V4L2_MPEG_AUDIO_SAMPLING_FREQ_44100,
- V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000,
- V4L2_MPEG_AUDIO_SAMPLING_FREQ_32000,
-) = range(3)
-
-V4L2_CID_MPEG_AUDIO_ENCODING = V4L2_CID_MPEG_BASE + 101
-
-v4l2_mpeg_audio_encoding = enum
-(
- V4L2_MPEG_AUDIO_ENCODING_LAYER_1,
- V4L2_MPEG_AUDIO_ENCODING_LAYER_2,
- V4L2_MPEG_AUDIO_ENCODING_LAYER_3,
- V4L2_MPEG_AUDIO_ENCODING_AAC,
- V4L2_MPEG_AUDIO_ENCODING_AC3,
-) = range(5)
-
-V4L2_CID_MPEG_AUDIO_L1_BITRATE = V4L2_CID_MPEG_BASE + 102
-
-v4l2_mpeg_audio_l1_bitrate = enum
-(
- V4L2_MPEG_AUDIO_L1_BITRATE_32K,
- V4L2_MPEG_AUDIO_L1_BITRATE_64K,
- V4L2_MPEG_AUDIO_L1_BITRATE_96K,
- V4L2_MPEG_AUDIO_L1_BITRATE_128K,
- V4L2_MPEG_AUDIO_L1_BITRATE_160K,
- V4L2_MPEG_AUDIO_L1_BITRATE_192K,
- V4L2_MPEG_AUDIO_L1_BITRATE_224K,
- V4L2_MPEG_AUDIO_L1_BITRATE_256K,
- V4L2_MPEG_AUDIO_L1_BITRATE_288K,
- V4L2_MPEG_AUDIO_L1_BITRATE_320K,
- V4L2_MPEG_AUDIO_L1_BITRATE_352K,
- V4L2_MPEG_AUDIO_L1_BITRATE_384K,
- V4L2_MPEG_AUDIO_L1_BITRATE_416K,
- V4L2_MPEG_AUDIO_L1_BITRATE_448K,
-) = range(14)
-
-V4L2_CID_MPEG_AUDIO_L2_BITRATE = V4L2_CID_MPEG_BASE + 103
-
-v4l2_mpeg_audio_l2_bitrate = enum
-(
- V4L2_MPEG_AUDIO_L2_BITRATE_32K,
- V4L2_MPEG_AUDIO_L2_BITRATE_48K,
- V4L2_MPEG_AUDIO_L2_BITRATE_56K,
- V4L2_MPEG_AUDIO_L2_BITRATE_64K,
- V4L2_MPEG_AUDIO_L2_BITRATE_80K,
- V4L2_MPEG_AUDIO_L2_BITRATE_96K,
- V4L2_MPEG_AUDIO_L2_BITRATE_112K,
- V4L2_MPEG_AUDIO_L2_BITRATE_128K,
- V4L2_MPEG_AUDIO_L2_BITRATE_160K,
- V4L2_MPEG_AUDIO_L2_BITRATE_192K,
- V4L2_MPEG_AUDIO_L2_BITRATE_224K,
- V4L2_MPEG_AUDIO_L2_BITRATE_256K,
- V4L2_MPEG_AUDIO_L2_BITRATE_320K,
- V4L2_MPEG_AUDIO_L2_BITRATE_384K,
-) = range(14)
-
-V4L2_CID_MPEG_AUDIO_L3_BITRATE = V4L2_CID_MPEG_BASE + 104
-
-v4l2_mpeg_audio_l3_bitrate = enum
-(
- V4L2_MPEG_AUDIO_L3_BITRATE_32K,
- V4L2_MPEG_AUDIO_L3_BITRATE_40K,
- V4L2_MPEG_AUDIO_L3_BITRATE_48K,
- V4L2_MPEG_AUDIO_L3_BITRATE_56K,
- V4L2_MPEG_AUDIO_L3_BITRATE_64K,
- V4L2_MPEG_AUDIO_L3_BITRATE_80K,
- V4L2_MPEG_AUDIO_L3_BITRATE_96K,
- V4L2_MPEG_AUDIO_L3_BITRATE_112K,
- V4L2_MPEG_AUDIO_L3_BITRATE_128K,
- V4L2_MPEG_AUDIO_L3_BITRATE_160K,
- V4L2_MPEG_AUDIO_L3_BITRATE_192K,
- V4L2_MPEG_AUDIO_L3_BITRATE_224K,
- V4L2_MPEG_AUDIO_L3_BITRATE_256K,
- V4L2_MPEG_AUDIO_L3_BITRATE_320K,
-) = range(14)
-
-V4L2_CID_MPEG_AUDIO_MODE = V4L2_CID_MPEG_BASE + 105
-
-v4l2_mpeg_audio_mode = enum
-(
- V4L2_MPEG_AUDIO_MODE_STEREO,
- V4L2_MPEG_AUDIO_MODE_JOINT_STEREO,
- V4L2_MPEG_AUDIO_MODE_DUAL,
- V4L2_MPEG_AUDIO_MODE_MONO,
-) = range(4)
-
-V4L2_CID_MPEG_AUDIO_MODE_EXTENSION = V4L2_CID_MPEG_BASE + 106
-
-v4l2_mpeg_audio_mode_extension = enum
-(
- V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_4,
- V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_8,
- V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_12,
- V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_16,
-) = range(4)
-
-V4L2_CID_MPEG_AUDIO_EMPHASIS = V4L2_CID_MPEG_BASE + 107
-
-v4l2_mpeg_audio_emphasis = enum
-(
- V4L2_MPEG_AUDIO_EMPHASIS_NONE,
- V4L2_MPEG_AUDIO_EMPHASIS_50_DIV_15_uS,
- V4L2_MPEG_AUDIO_EMPHASIS_CCITT_J17,
-) = range(3)
-
-V4L2_CID_MPEG_AUDIO_CRC = V4L2_CID_MPEG_BASE + 108
-
-v4l2_mpeg_audio_crc = enum
-(
- V4L2_MPEG_AUDIO_CRC_NONE,
- V4L2_MPEG_AUDIO_CRC_CRC16,
-) = range(2)
-
-V4L2_CID_MPEG_AUDIO_MUTE = V4L2_CID_MPEG_BASE + 109
-V4L2_CID_MPEG_AUDIO_AAC_BITRATE = V4L2_CID_MPEG_BASE + 110
-V4L2_CID_MPEG_AUDIO_AC3_BITRATE = V4L2_CID_MPEG_BASE + 111
-
-v4l2_mpeg_audio_ac3_bitrate = enum
-(
- V4L2_MPEG_AUDIO_AC3_BITRATE_32K,
- V4L2_MPEG_AUDIO_AC3_BITRATE_40K,
- V4L2_MPEG_AUDIO_AC3_BITRATE_48K,
- V4L2_MPEG_AUDIO_AC3_BITRATE_56K,
- V4L2_MPEG_AUDIO_AC3_BITRATE_64K,
- V4L2_MPEG_AUDIO_AC3_BITRATE_80K,
- V4L2_MPEG_AUDIO_AC3_BITRATE_96K,
- V4L2_MPEG_AUDIO_AC3_BITRATE_112K,
- V4L2_MPEG_AUDIO_AC3_BITRATE_128K,
- V4L2_MPEG_AUDIO_AC3_BITRATE_160K,
- V4L2_MPEG_AUDIO_AC3_BITRATE_192K,
- V4L2_MPEG_AUDIO_AC3_BITRATE_224K,
- V4L2_MPEG_AUDIO_AC3_BITRATE_256K,
- V4L2_MPEG_AUDIO_AC3_BITRATE_320K,
- V4L2_MPEG_AUDIO_AC3_BITRATE_384K,
- V4L2_MPEG_AUDIO_AC3_BITRATE_448K,
- V4L2_MPEG_AUDIO_AC3_BITRATE_512K,
- V4L2_MPEG_AUDIO_AC3_BITRATE_576K,
- V4L2_MPEG_AUDIO_AC3_BITRATE_640K,
-) = range(19)
-
-V4L2_CID_MPEG_VIDEO_ENCODING = V4L2_CID_MPEG_BASE + 200
-
-v4l2_mpeg_video_encoding = enum
-(
- V4L2_MPEG_VIDEO_ENCODING_MPEG_1,
- V4L2_MPEG_VIDEO_ENCODING_MPEG_2,
- V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC,
-) = range(3)
-
-V4L2_CID_MPEG_VIDEO_ASPECT = V4L2_CID_MPEG_BASE + 201
-
-v4l2_mpeg_video_aspect = enum
-(
- V4L2_MPEG_VIDEO_ASPECT_1x1,
- V4L2_MPEG_VIDEO_ASPECT_4x3,
- V4L2_MPEG_VIDEO_ASPECT_16x9,
- V4L2_MPEG_VIDEO_ASPECT_221x100,
-) = range(4)
-
-V4L2_CID_MPEG_VIDEO_B_FRAMES = V4L2_CID_MPEG_BASE + 202
-V4L2_CID_MPEG_VIDEO_GOP_SIZE = V4L2_CID_MPEG_BASE + 203
-V4L2_CID_MPEG_VIDEO_GOP_CLOSURE = V4L2_CID_MPEG_BASE + 204
-V4L2_CID_MPEG_VIDEO_PULLDOWN = V4L2_CID_MPEG_BASE + 205
-V4L2_CID_MPEG_VIDEO_BITRATE_MODE = V4L2_CID_MPEG_BASE + 206
-
-v4l2_mpeg_video_bitrate_mode = enum
-(
- V4L2_MPEG_VIDEO_BITRATE_MODE_VBR,
- V4L2_MPEG_VIDEO_BITRATE_MODE_CBR,
-) = range(2)
-
-V4L2_CID_MPEG_VIDEO_BITRATE = V4L2_CID_MPEG_BASE + 207
-V4L2_CID_MPEG_VIDEO_BITRATE_PEAK = V4L2_CID_MPEG_BASE + 208
-V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION = V4L2_CID_MPEG_BASE + 209
-V4L2_CID_MPEG_VIDEO_MUTE = V4L2_CID_MPEG_BASE + 210
-V4L2_CID_MPEG_VIDEO_MUTE_YUV = V4L2_CID_MPEG_BASE + 211
-
-V4L2_CID_MPEG_CX2341X_BASE = V4L2_CTRL_CLASS_MPEG | 0x1000
-V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE = V4L2_CID_MPEG_CX2341X_BASE + 0
-
-v4l2_mpeg_cx2341x_video_spatial_filter_mode = enum
-(
- V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_MANUAL,
- V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO,
-) = range(2)
-
-V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER = V4L2_CID_MPEG_CX2341X_BASE + 1
-V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE = V4L2_CID_MPEG_CX2341X_BASE + 2
-
-v4l2_mpeg_cx2341x_video_luma_spatial_filter_type = enum
-(
- V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_OFF,
- V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_1D_HOR,
- V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_1D_VERT,
- V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_HV_SEPARABLE,
- V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_SYM_NON_SEPARABLE,
-) = range(5)
-
-V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE = V4L2_CID_MPEG_CX2341X_BASE + 3
-
-v4l2_mpeg_cx2341x_video_chroma_spatial_filter_type = enum
-(
- V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_OFF,
- V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_1D_HOR,
-) = range(2)
-
-V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE = V4L2_CID_MPEG_CX2341X_BASE + 4
-
-v4l2_mpeg_cx2341x_video_temporal_filter_mode = enum
-(
- V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_MANUAL,
- V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_AUTO,
-) = range(2)
-
-V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER = V4L2_CID_MPEG_CX2341X_BASE + 5
-V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE = V4L2_CID_MPEG_CX2341X_BASE + 6
-
-v4l2_mpeg_cx2341x_video_median_filter_type = enum
-(
- V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF,
- V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_HOR,
- V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_VERT,
- V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_HOR_VERT,
- V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_DIAG,
-) = range(5)
-
-V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM = V4L2_CID_MPEG_CX2341X_BASE + 7
-V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP = V4L2_CID_MPEG_CX2341X_BASE + 8
-V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM = V4L2_CID_MPEG_CX2341X_BASE + 9
-V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP = V4L2_CID_MPEG_CX2341X_BASE + 10
-V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS = V4L2_CID_MPEG_CX2341X_BASE + 11
-
-V4L2_CID_CAMERA_CLASS_BASE = V4L2_CTRL_CLASS_CAMERA | 0x900
-V4L2_CID_CAMERA_CLASS = V4L2_CTRL_CLASS_CAMERA | 1
-
-V4L2_CID_EXPOSURE_AUTO = V4L2_CID_CAMERA_CLASS_BASE + 1
-
-v4l2_exposure_auto_type = enum
-(
- V4L2_EXPOSURE_AUTO,
- V4L2_EXPOSURE_MANUAL,
- V4L2_EXPOSURE_SHUTTER_PRIORITY,
- V4L2_EXPOSURE_APERTURE_PRIORITY,
-) = range(4)
-
-V4L2_CID_EXPOSURE_ABSOLUTE = V4L2_CID_CAMERA_CLASS_BASE + 2
-V4L2_CID_EXPOSURE_AUTO_PRIORITY = V4L2_CID_CAMERA_CLASS_BASE + 3
-
-V4L2_CID_PAN_RELATIVE = V4L2_CID_CAMERA_CLASS_BASE + 4
-V4L2_CID_TILT_RELATIVE = V4L2_CID_CAMERA_CLASS_BASE + 5
-V4L2_CID_PAN_RESET = V4L2_CID_CAMERA_CLASS_BASE + 6
-V4L2_CID_TILT_RESET = V4L2_CID_CAMERA_CLASS_BASE + 7
-
-V4L2_CID_PAN_ABSOLUTE = V4L2_CID_CAMERA_CLASS_BASE + 8
-V4L2_CID_TILT_ABSOLUTE = V4L2_CID_CAMERA_CLASS_BASE + 9
-
-V4L2_CID_FOCUS_ABSOLUTE = V4L2_CID_CAMERA_CLASS_BASE + 10
-V4L2_CID_FOCUS_RELATIVE = V4L2_CID_CAMERA_CLASS_BASE + 11
-V4L2_CID_FOCUS_AUTO = V4L2_CID_CAMERA_CLASS_BASE + 12
-
-V4L2_CID_ZOOM_ABSOLUTE = V4L2_CID_CAMERA_CLASS_BASE + 13
-V4L2_CID_ZOOM_RELATIVE = V4L2_CID_CAMERA_CLASS_BASE + 14
-V4L2_CID_ZOOM_CONTINUOUS = V4L2_CID_CAMERA_CLASS_BASE + 15
-
-V4L2_CID_PRIVACY = V4L2_CID_CAMERA_CLASS_BASE + 16
-
-V4L2_CID_FM_TX_CLASS_BASE = V4L2_CTRL_CLASS_FM_TX | 0x900
-V4L2_CID_FM_TX_CLASS = V4L2_CTRL_CLASS_FM_TX | 1
-
-V4L2_CID_RDS_TX_DEVIATION = V4L2_CID_FM_TX_CLASS_BASE + 1
-V4L2_CID_RDS_TX_PI = V4L2_CID_FM_TX_CLASS_BASE + 2
-V4L2_CID_RDS_TX_PTY = V4L2_CID_FM_TX_CLASS_BASE + 3
-V4L2_CID_RDS_TX_PS_NAME = V4L2_CID_FM_TX_CLASS_BASE + 5
-V4L2_CID_RDS_TX_RADIO_TEXT = V4L2_CID_FM_TX_CLASS_BASE + 6
-
-V4L2_CID_AUDIO_LIMITER_ENABLED = V4L2_CID_FM_TX_CLASS_BASE + 64
-V4L2_CID_AUDIO_LIMITER_RELEASE_TIME = V4L2_CID_FM_TX_CLASS_BASE + 65
-V4L2_CID_AUDIO_LIMITER_DEVIATION = V4L2_CID_FM_TX_CLASS_BASE + 66
-
-V4L2_CID_AUDIO_COMPRESSION_ENABLED = V4L2_CID_FM_TX_CLASS_BASE + 80
-V4L2_CID_AUDIO_COMPRESSION_GAIN = V4L2_CID_FM_TX_CLASS_BASE + 81
-V4L2_CID_AUDIO_COMPRESSION_THRESHOLD = V4L2_CID_FM_TX_CLASS_BASE + 82
-V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME = V4L2_CID_FM_TX_CLASS_BASE + 83
-V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME = V4L2_CID_FM_TX_CLASS_BASE + 84
-
-V4L2_CID_PILOT_TONE_ENABLED = V4L2_CID_FM_TX_CLASS_BASE + 96
-V4L2_CID_PILOT_TONE_DEVIATION = V4L2_CID_FM_TX_CLASS_BASE + 97
-V4L2_CID_PILOT_TONE_FREQUENCY = V4L2_CID_FM_TX_CLASS_BASE + 98
-
-V4L2_CID_TUNE_PREEMPHASIS = V4L2_CID_FM_TX_CLASS_BASE + 112
-
-v4l2_preemphasis = enum
-(
- V4L2_PREEMPHASIS_DISABLED,
- V4L2_PREEMPHASIS_50_uS,
- V4L2_PREEMPHASIS_75_uS,
-) = range(3)
-
-V4L2_CID_TUNE_POWER_LEVEL = V4L2_CID_FM_TX_CLASS_BASE + 113
-V4L2_CID_TUNE_ANTENNA_CAPACITOR = V4L2_CID_FM_TX_CLASS_BASE + 114
-
-
-#
-# Tuning
-#
-
-class v4l2_tuner(ctypes.Structure):
- _fields_ = [
- ('index', ctypes.c_uint32),
- ('name', ctypes.c_char * 32),
- ('type', v4l2_tuner_type),
- ('capability', ctypes.c_uint32),
- ('rangelow', ctypes.c_uint32),
- ('rangehigh', ctypes.c_uint32),
- ('rxsubchans', ctypes.c_uint32),
- ('audmode', ctypes.c_uint32),
- ('signal', ctypes.c_int32),
- ('afc', ctypes.c_int32),
- ('reserved', ctypes.c_uint32 * 4),
- ]
-
-
-class v4l2_modulator(ctypes.Structure):
- _fields_ = [
- ('index', ctypes.c_uint32),
- ('name', ctypes.c_char * 32),
- ('capability', ctypes.c_uint32),
- ('rangelow', ctypes.c_uint32),
- ('rangehigh', ctypes.c_uint32),
- ('txsubchans', ctypes.c_uint32),
- ('reserved', ctypes.c_uint32 * 4),
- ]
-
-
-V4L2_TUNER_CAP_LOW = 0x0001
-V4L2_TUNER_CAP_NORM = 0x0002
-V4L2_TUNER_CAP_STEREO = 0x0010
-V4L2_TUNER_CAP_LANG2 = 0x0020
-V4L2_TUNER_CAP_SAP = 0x0020
-V4L2_TUNER_CAP_LANG1 = 0x0040
-V4L2_TUNER_CAP_RDS = 0x0080
-
-V4L2_TUNER_SUB_MONO = 0x0001
-V4L2_TUNER_SUB_STEREO = 0x0002
-V4L2_TUNER_SUB_LANG2 = 0x0004
-V4L2_TUNER_SUB_SAP = 0x0004
-V4L2_TUNER_SUB_LANG1 = 0x0008
-V4L2_TUNER_SUB_RDS = 0x0010
-
-V4L2_TUNER_MODE_MONO = 0x0000
-V4L2_TUNER_MODE_STEREO = 0x0001
-V4L2_TUNER_MODE_LANG2 = 0x0002
-V4L2_TUNER_MODE_SAP = 0x0002
-V4L2_TUNER_MODE_LANG1 = 0x0003
-V4L2_TUNER_MODE_LANG1_LANG2 = 0x0004
-
-
-class v4l2_frequency(ctypes.Structure):
- _fields_ = [
- ('tuner', ctypes.c_uint32),
- ('type', v4l2_tuner_type),
- ('frequency', ctypes.c_uint32),
- ('reserved', ctypes.c_uint32 * 8),
- ]
-
-
-class v4l2_hw_freq_seek(ctypes.Structure):
- _fields_ = [
- ('tuner', ctypes.c_uint32),
- ('type', v4l2_tuner_type),
- ('seek_upward', ctypes.c_uint32),
- ('wrap_around', ctypes.c_uint32),
- ('reserved', ctypes.c_uint32 * 8),
- ]
-
-
-#
-# RDS
-#
-
-class v4l2_rds_data(ctypes.Structure):
- _fields_ = [
- ('lsb', ctypes.c_char),
- ('msb', ctypes.c_char),
- ('block', ctypes.c_char),
- ]
-
- _pack_ = True
-
-
-V4L2_RDS_BLOCK_MSK = 0x7
-V4L2_RDS_BLOCK_A = 0
-V4L2_RDS_BLOCK_B = 1
-V4L2_RDS_BLOCK_C = 2
-V4L2_RDS_BLOCK_D = 3
-V4L2_RDS_BLOCK_C_ALT = 4
-V4L2_RDS_BLOCK_INVALID = 7
-
-V4L2_RDS_BLOCK_CORRECTED = 0x40
-V4L2_RDS_BLOCK_ERROR = 0x80
-
-
-#
-# Audio
-#
-
-class v4l2_audio(ctypes.Structure):
- _fields_ = [
- ('index', ctypes.c_uint32),
- ('name', ctypes.c_char * 32),
- ('capability', ctypes.c_uint32),
- ('mode', ctypes.c_uint32),
- ('reserved', ctypes.c_uint32 * 2),
- ]
-
-
-V4L2_AUDCAP_STEREO = 0x00001
-V4L2_AUDCAP_AVL = 0x00002
-
-V4L2_AUDMODE_AVL = 0x00001
-
-
-class v4l2_audioout(ctypes.Structure):
- _fields_ = [
- ('index', ctypes.c_uint32),
- ('name', ctypes.c_char * 32),
- ('capability', ctypes.c_uint32),
- ('mode', ctypes.c_uint32),
- ('reserved', ctypes.c_uint32 * 2),
- ]
-
-
-#
-# Mpeg services (experimental)
-#
-
-V4L2_ENC_IDX_FRAME_I = 0
-V4L2_ENC_IDX_FRAME_P = 1
-V4L2_ENC_IDX_FRAME_B = 2
-V4L2_ENC_IDX_FRAME_MASK = 0xf
-
-
-class v4l2_enc_idx_entry(ctypes.Structure):
- _fields_ = [
- ('offset', ctypes.c_uint64),
- ('pts', ctypes.c_uint64),
- ('length', ctypes.c_uint32),
- ('flags', ctypes.c_uint32),
- ('reserved', ctypes.c_uint32 * 2),
- ]
-
-
-V4L2_ENC_IDX_ENTRIES = 64
-
-
-class v4l2_enc_idx(ctypes.Structure):
- _fields_ = [
- ('entries', ctypes.c_uint32),
- ('entries_cap', ctypes.c_uint32),
- ('reserved', ctypes.c_uint32 * 4),
- ('entry', v4l2_enc_idx_entry * V4L2_ENC_IDX_ENTRIES),
- ]
-
-
-V4L2_ENC_CMD_START = 0
-V4L2_ENC_CMD_STOP = 1
-V4L2_ENC_CMD_PAUSE = 2
-V4L2_ENC_CMD_RESUME = 3
-
-V4L2_ENC_CMD_STOP_AT_GOP_END = 1 << 0
-
-
-class v4l2_encoder_cmd(ctypes.Structure):
- class _u(ctypes.Union):
- class _s(ctypes.Structure):
- _fields_ = [
- ('data', ctypes.c_uint32 * 8),
- ]
-
- _fields_ = [
- ('raw', _s),
- ]
-
- _fields_ = [
- ('cmd', ctypes.c_uint32),
- ('flags', ctypes.c_uint32),
- ('_u', _u),
- ]
-
- _anonymous_ = ('_u',)
-
-
-#
-# Data services (VBI)
-#
-
-class v4l2_vbi_format(ctypes.Structure):
- _fields_ = [
- ('sampling_rate', ctypes.c_uint32),
- ('offset', ctypes.c_uint32),
- ('samples_per_line', ctypes.c_uint32),
- ('sample_format', ctypes.c_uint32),
- ('start', ctypes.c_int32 * 2),
- ('count', ctypes.c_uint32 * 2),
- ('flags', ctypes.c_uint32),
- ('reserved', ctypes.c_uint32 * 2),
- ]
-
-
-V4L2_VBI_UNSYNC = 1 << 0
-V4L2_VBI_INTERLACED = 1 << 1
-
-
-class v4l2_sliced_vbi_format(ctypes.Structure):
- _fields_ = [
- ('service_set', ctypes.c_uint16),
- ('service_lines', ctypes.c_uint16 * 2 * 24),
- ('io_size', ctypes.c_uint32),
- ('reserved', ctypes.c_uint32 * 2),
- ]
-
-
-V4L2_SLICED_TELETEXT_B = 0x0001
-V4L2_SLICED_VPS = 0x0400
-V4L2_SLICED_CAPTION_525 = 0x1000
-V4L2_SLICED_WSS_625 = 0x4000
-V4L2_SLICED_VBI_525 = V4L2_SLICED_CAPTION_525
-V4L2_SLICED_VBI_625 = (
- V4L2_SLICED_TELETEXT_B | V4L2_SLICED_VPS | V4L2_SLICED_WSS_625)
-
-
-class v4l2_sliced_vbi_cap(ctypes.Structure):
- _fields_ = [
- ('service_set', ctypes.c_uint16),
- ('service_lines', ctypes.c_uint16 * 2 * 24),
- ('type', v4l2_buf_type),
- ('reserved', ctypes.c_uint32 * 3),
- ]
-
-
-class v4l2_sliced_vbi_data(ctypes.Structure):
- _fields_ = [
- ('id', ctypes.c_uint32),
- ('field', ctypes.c_uint32),
- ('line', ctypes.c_uint32),
- ('reserved', ctypes.c_uint32),
- ('data', ctypes.c_char * 48),
- ]
-
-
-#
-# Sliced VBI data inserted into MPEG Streams
-#
-
-
-V4L2_MPEG_VBI_IVTV_TELETEXT_B = 1
-V4L2_MPEG_VBI_IVTV_CAPTION_525 = 4
-V4L2_MPEG_VBI_IVTV_WSS_625 = 5
-V4L2_MPEG_VBI_IVTV_VPS = 7
-
-
-class v4l2_mpeg_vbi_itv0_line(ctypes.Structure):
- _fields_ = [
- ('id', ctypes.c_char),
- ('data', ctypes.c_char * 42),
- ]
-
- _pack_ = True
-
-
-class v4l2_mpeg_vbi_itv0(ctypes.Structure):
- _fields_ = [
- ('linemask', ctypes.c_uint32 * 2), # how to define __le32 in ctypes?
- ('line', v4l2_mpeg_vbi_itv0_line * 35),
- ]
-
- _pack_ = True
-
-
-class v4l2_mpeg_vbi_ITV0(ctypes.Structure):
- _fields_ = [
- ('line', v4l2_mpeg_vbi_itv0_line * 36),
- ]
-
- _pack_ = True
-
-
-V4L2_MPEG_VBI_IVTV_MAGIC0 = "itv0"
-V4L2_MPEG_VBI_IVTV_MAGIC1 = "ITV0"
-
-
-class v4l2_mpeg_vbi_fmt_ivtv(ctypes.Structure):
- class _u(ctypes.Union):
- _fields_ = [
- ('itv0', v4l2_mpeg_vbi_itv0),
- ('ITV0', v4l2_mpeg_vbi_ITV0),
- ]
-
- _fields_ = [
- ('magic', ctypes.c_char * 4),
- ('_u', _u)
- ]
-
- _anonymous_ = ('_u',)
- _pack_ = True
-
-
-#
-# Aggregate structures
-#
-
-class v4l2_format(ctypes.Structure):
- class _u(ctypes.Union):
- _fields_ = [
- ('pix', v4l2_pix_format),
- ('win', v4l2_window),
- ('vbi', v4l2_vbi_format),
- ('sliced', v4l2_sliced_vbi_format),
- ('raw_data', ctypes.c_char * 200),
- ]
-
- _fields_ = [
- ('type', v4l2_buf_type),
- ('fmt', _u),
- ]
-
-
-class v4l2_streamparm(ctypes.Structure):
- class _u(ctypes.Union):
- _fields_ = [
- ('capture', v4l2_captureparm),
- ('output', v4l2_outputparm),
- ('raw_data', ctypes.c_char * 200),
- ]
-
- _fields_ = [
- ('type', v4l2_buf_type),
- ('parm', _u)
- ]
-
-
-#
-# Advanced debugging
-#
-
-V4L2_CHIP_MATCH_HOST = 0
-V4L2_CHIP_MATCH_I2C_DRIVER = 1
-V4L2_CHIP_MATCH_I2C_ADDR = 2
-V4L2_CHIP_MATCH_AC97 = 3
-
-
-class v4l2_dbg_match(ctypes.Structure):
- class _u(ctypes.Union):
- _fields_ = [
- ('addr', ctypes.c_uint32),
- ('name', ctypes.c_char * 32),
- ]
-
- _fields_ = [
- ('type', ctypes.c_uint32),
- ('_u', _u),
- ]
-
- _anonymous_ = ('_u',)
- _pack_ = True
-
-
-class v4l2_dbg_register(ctypes.Structure):
- _fields_ = [
- ('match', v4l2_dbg_match),
- ('size', ctypes.c_uint32),
- ('reg', ctypes.c_uint64),
- ('val', ctypes.c_uint64),
- ]
-
- _pack_ = True
-
-
-class v4l2_dbg_chip_ident(ctypes.Structure):
- _fields_ = [
- ('match', v4l2_dbg_match),
- ('ident', ctypes.c_uint32),
- ('revision', ctypes.c_uint32),
- ]
-
- _pack_ = True
-
-
-#
-# ioctl codes for video devices
-#
-
-VIDIOC_QUERYCAP = _IOR('V', 0, v4l2_capability)
-VIDIOC_RESERVED = _IO('V', 1)
-VIDIOC_ENUM_FMT = _IOWR('V', 2, v4l2_fmtdesc)
-VIDIOC_G_FMT = _IOWR('V', 4, v4l2_format)
-VIDIOC_S_FMT = _IOWR('V', 5, v4l2_format)
-VIDIOC_REQBUFS = _IOWR('V', 8, v4l2_requestbuffers)
-VIDIOC_QUERYBUF = _IOWR('V', 9, v4l2_buffer)
-VIDIOC_G_FBUF = _IOR('V', 10, v4l2_framebuffer)
-VIDIOC_S_FBUF = _IOW('V', 11, v4l2_framebuffer)
-VIDIOC_OVERLAY = _IOW('V', 14, ctypes.c_int)
-VIDIOC_QBUF = _IOWR('V', 15, v4l2_buffer)
-VIDIOC_DQBUF = _IOWR('V', 17, v4l2_buffer)
-VIDIOC_STREAMON = _IOW('V', 18, ctypes.c_int)
-VIDIOC_STREAMOFF = _IOW('V', 19, ctypes.c_int)
-VIDIOC_G_PARM = _IOWR('V', 21, v4l2_streamparm)
-VIDIOC_S_PARM = _IOWR('V', 22, v4l2_streamparm)
-VIDIOC_G_STD = _IOR('V', 23, v4l2_std_id)
-VIDIOC_S_STD = _IOW('V', 24, v4l2_std_id)
-VIDIOC_ENUMSTD = _IOWR('V', 25, v4l2_standard)
-VIDIOC_ENUMINPUT = _IOWR('V', 26, v4l2_input)
-VIDIOC_G_CTRL = _IOWR('V', 27, v4l2_control)
-VIDIOC_S_CTRL = _IOWR('V', 28, v4l2_control)
-VIDIOC_G_TUNER = _IOWR('V', 29, v4l2_tuner)
-VIDIOC_S_TUNER = _IOW('V', 30, v4l2_tuner)
-VIDIOC_G_AUDIO = _IOR('V', 33, v4l2_audio)
-VIDIOC_S_AUDIO = _IOW('V', 34, v4l2_audio)
-VIDIOC_QUERYCTRL = _IOWR('V', 36, v4l2_queryctrl)
-VIDIOC_QUERYMENU = _IOWR('V', 37, v4l2_querymenu)
-VIDIOC_G_INPUT = _IOR('V', 38, ctypes.c_int)
-VIDIOC_S_INPUT = _IOWR('V', 39, ctypes.c_int)
-VIDIOC_G_OUTPUT = _IOR('V', 46, ctypes.c_int)
-VIDIOC_S_OUTPUT = _IOWR('V', 47, ctypes.c_int)
-VIDIOC_ENUMOUTPUT = _IOWR('V', 48, v4l2_output)
-VIDIOC_G_AUDOUT = _IOR('V', 49, v4l2_audioout)
-VIDIOC_S_AUDOUT = _IOW('V', 50, v4l2_audioout)
-VIDIOC_G_MODULATOR = _IOWR('V', 54, v4l2_modulator)
-VIDIOC_S_MODULATOR = _IOW('V', 55, v4l2_modulator)
-VIDIOC_G_FREQUENCY = _IOWR('V', 56, v4l2_frequency)
-VIDIOC_S_FREQUENCY = _IOW('V', 57, v4l2_frequency)
-VIDIOC_CROPCAP = _IOWR('V', 58, v4l2_cropcap)
-VIDIOC_G_CROP = _IOWR('V', 59, v4l2_crop)
-VIDIOC_S_CROP = _IOW('V', 60, v4l2_crop)
-VIDIOC_G_JPEGCOMP = _IOR('V', 61, v4l2_jpegcompression)
-VIDIOC_S_JPEGCOMP = _IOW('V', 62, v4l2_jpegcompression)
-VIDIOC_QUERYSTD = _IOR('V', 63, v4l2_std_id)
-VIDIOC_TRY_FMT = _IOWR('V', 64, v4l2_format)
-VIDIOC_ENUMAUDIO = _IOWR('V', 65, v4l2_audio)
-VIDIOC_ENUMAUDOUT = _IOWR('V', 66, v4l2_audioout)
-VIDIOC_G_PRIORITY = _IOR('V', 67, v4l2_priority)
-VIDIOC_S_PRIORITY = _IOW('V', 68, v4l2_priority)
-VIDIOC_G_SLICED_VBI_CAP = _IOWR('V', 69, v4l2_sliced_vbi_cap)
-VIDIOC_LOG_STATUS = _IO('V', 70)
-VIDIOC_G_EXT_CTRLS = _IOWR('V', 71, v4l2_ext_controls)
-VIDIOC_S_EXT_CTRLS = _IOWR('V', 72, v4l2_ext_controls)
-VIDIOC_TRY_EXT_CTRLS = _IOWR('V', 73, v4l2_ext_controls)
-
-VIDIOC_ENUM_FRAMESIZES = _IOWR('V', 74, v4l2_frmsizeenum)
-VIDIOC_ENUM_FRAMEINTERVALS = _IOWR('V', 75, v4l2_frmivalenum)
-VIDIOC_G_ENC_INDEX = _IOR('V', 76, v4l2_enc_idx)
-VIDIOC_ENCODER_CMD = _IOWR('V', 77, v4l2_encoder_cmd)
-VIDIOC_TRY_ENCODER_CMD = _IOWR('V', 78, v4l2_encoder_cmd)
-
-VIDIOC_DBG_S_REGISTER = _IOW('V', 79, v4l2_dbg_register)
-VIDIOC_DBG_G_REGISTER = _IOWR('V', 80, v4l2_dbg_register)
-
-VIDIOC_DBG_G_CHIP_IDENT = _IOWR('V', 81, v4l2_dbg_chip_ident)
-
-VIDIOC_S_HW_FREQ_SEEK = _IOW('V', 82, v4l2_hw_freq_seek)
-VIDIOC_ENUM_DV_PRESETS = _IOWR('V', 83, v4l2_dv_enum_preset)
-VIDIOC_S_DV_PRESET = _IOWR('V', 84, v4l2_dv_preset)
-VIDIOC_G_DV_PRESET = _IOWR('V', 85, v4l2_dv_preset)
-VIDIOC_QUERY_DV_PRESET = _IOR('V', 86, v4l2_dv_preset)
-VIDIOC_S_DV_TIMINGS = _IOWR('V', 87, v4l2_dv_timings)
-VIDIOC_G_DV_TIMINGS = _IOWR('V', 88, v4l2_dv_timings)
-
-VIDIOC_OVERLAY_OLD = _IOWR('V', 14, ctypes.c_int)
-VIDIOC_S_PARM_OLD = _IOW('V', 22, v4l2_streamparm)
-VIDIOC_S_CTRL_OLD = _IOW('V', 28, v4l2_control)
-VIDIOC_G_AUDIO_OLD = _IOWR('V', 33, v4l2_audio)
-VIDIOC_G_AUDOUT_OLD = _IOWR('V', 49, v4l2_audioout)
-VIDIOC_CROPCAP_OLD = _IOR('V', 58, v4l2_cropcap)
-
-BASE_VIDIOC_PRIVATE = 192