gestures

[unmaintained] PoC for motion gesture detection from webcam input
Log | Files | Refs | README | LICENSE

commit c4e6cb4db310e87434af211100bdc76f818c36a0
parent 3e28b12f104248f5a8b6e83cb0aa3ea6a84b04b5
Author: Stefan Koch <taikano@lavabit.com>
Date:   Sat, 15 Jun 2013 12:35:34 +0200

added support for saving training data

Diffstat:
Mmodels.py | 18+++++++++++++-----
Mtrack.py | 15+++++++++++----
2 files changed, 24 insertions(+), 9 deletions(-)

diff --git a/models.py b/models.py @@ -1,4 +1,6 @@ import ghmm +import simplejson +import os.path UP = 0 DOWN = 1 @@ -62,12 +64,18 @@ def initial_vector(gesture): models = [] sigma = ghmm.IntegerRange(0, 8) +i = 0 for gesture in gestures: - # transition matrix - A = transition_matrix(gesture[0]) - B = emission_matrix(gesture[0]) - pi = initial_vector(gesture[0]) - # in gesture + if not os.path.isfile('/'.join(('models', str(i)))): + A = transition_matrix(gesture[0]) + B = emission_matrix(gesture[0]) + pi = initial_vector(gesture[0]) + else: + with open('/'.join(('models', str(i)))) as f: + (A, B, pi) = simplejson.load(f) + m = ghmm.HMMFromMatrices(sigma, ghmm.DiscreteDistribution(sigma), A, B, pi) print(m) models.append((m, gesture[1])) + + i += 1 diff --git a/track.py b/track.py @@ -6,6 +6,7 @@ import subprocess import numpy import ghmm import models +import simplejson path = [] @@ -55,7 +56,7 @@ def execute(emission_seq, models): max_comm = None max_val = 0 for model, command in models: - print(model.forward(emission_seq)) + #print(model.forward(emission_seq)) res = model.forward(emission_seq) if res[1][-1] > max_val: @@ -82,7 +83,8 @@ winName = "Movement Indicator" cv2.namedWindow(winName, cv2.CV_WINDOW_AUTOSIZE) img = cv2.cvtColor(cam.read()[1], cv2.COLOR_BGR2HSV) -img = cv2.inRange(img, (70, 100, 100), (150, 255, 255)) +cv2.imwrite('test.jpg', img) +img = cv2.inRange(img, (130, 50, 50), (160, 100, 100)) img = cv2.erode(img, numpy.array([[1,1,1],[1,1,1],[1,1,1]])) img = cv2.dilate(img, numpy.array([[1,1,1],[1,1,1],[1,1,1]]), iterations=3) @@ -101,8 +103,8 @@ while True: y0 = y1 img = cv2.cvtColor(cam.read()[1], cv2.COLOR_BGR2HSV) - img = cv2.inRange(img, (70, 50, 50), (150, 255, 255)) - img = cv2.erode(img, numpy.array([[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]]), iterations=2) + img = cv2.inRange(img, (70, 50, 50), (130, 200, 200)) + img = cv2.erode(img, numpy.array([[1,1,1,1,1,1,1],[1,1,1,1,1,1,1],[1,1,1,1,1,1,1],[1,1,1,1,1,1,1],[1,1,1,1,1,1,1],[1,1,1,1,1,1,1],[1,1,1,1,1,1,1]]), iterations=2) img = cv2.dilate(img, numpy.array([[1,1,1],[1,1,1],[1,1,1]]), iterations=3) x1, y1 = pointer_pos(img) @@ -128,6 +130,11 @@ while True: train_mode = False print("Leaving training mode") print(models.models[train_target][0]) + + with open("/".join(('models', str(train_target))), 'w') as f: + (A, B, pi) = tuple(models.models[train_target][0].asMatrices()) + simplejson.dump((A, B, pi), f) + path = [] not_changed = 0