Hier werden die Unterschiede zwischen zwei Versionen gezeigt.
Beide Seiten der vorigen Revision Vorhergehende Überarbeitung Nächste Überarbeitung | Vorhergehende Überarbeitung | ||
techniken:kinect [2013/10/15 17:05] c.jaedicke [KinectPhysics] |
techniken:kinect [2016/01/21 12:45] (aktuell) |
||
---|---|---|---|
Zeile 1: | Zeile 1: | ||
====== Kinect ====== | ====== Kinect ====== | ||
- | Die Kinect ist eine Hardware zur Steuerung der Xbox360. Mit ihr lassen sich besonders gut Tiefen-Informationen eines Raumes gewinnen und die Silhouette eines Menschen vom Raum trennen. | + | Die Kinect ist eine Hardware zur Steuerung der Xbox360. Mit ihr lassen sich besonders gut Tiefen-Informationen eines Raumes gewinnen und die Bewegungen eines Menschen im Raum messen. |
- | Wir wollen zunächst die Kinect unter Windows für Processing einrichten. Dazu müsst ihr folgende Installationen durchführen: | + | Sie misst den Abstand von Oberflächen zur Kamera an vielen verschiedenen Orten im Sichtfeld und erzeugt so eine Art Tiefen-Video, in dem die Farbe eine Pixels dem Abstand zur Kamera in mm entspricht. Zusätzlich erkennt sie Personen im Blickfeld und gibt eine Liste von Gelenkpositionen "Joint coordinates" in mm aus. |
+ | |||
+ | ===== Installation ===== | ||
+ | ==== Windows ==== | ||
+ | Um die unter Windows für Processing einzurichten,müsst ihr folgende Installationen durchführen: | ||
- Processing | - Processing | ||
- | - Download [[https://processing.org/download/|Processing 2.0]] oder spätere | + | - Download [[https://processing.org/download/|Processing 2.0]] oder aktueller |
- | - Geh in das Menü: Sketch -> Import Library -> Add Library | + | - Geht in das Menü: Sketch -> Import Library -> Add Library |
- Installiert "SimpleOpenNI" | - Installiert "SimpleOpenNI" | ||
+ | |||
- Kinect SDK | - Kinect SDK | ||
- Download [[http://go.microsoft.com/fwlink/?LinkId=275588|Kinect SDK]] | - Download [[http://go.microsoft.com/fwlink/?LinkId=275588|Kinect SDK]] | ||
Zeile 15: | Zeile 20: | ||
Es kann sein das ihr zusätzlich noch die .NET Frameworks installieren müsst, aber darauf macht euch der Kinect SDK installier aufmerksam. | Es kann sein das ihr zusätzlich noch die .NET Frameworks installieren müsst, aber darauf macht euch der Kinect SDK installier aufmerksam. | ||
Nach erfolgreicher Installation sollte die Kinect im Gerätemanager auftauchen. | Nach erfolgreicher Installation sollte die Kinect im Gerätemanager auftauchen. | ||
+ | ===== Nützliche Beispiele ===== | ||
+ | Die SimpleOnenNI Library kommt mit einigen Beispielen, die euch eine Idee davon vermitteln, wie ihr Daten aus der Kinekt in euer Programm bekommt. | ||
+ | === Skelettkoordinaten === | ||
+ | Wenn Ihr hauptsächlich an der Position eines vor der Kinekt stehenden Menschen interessiert seid, schaut euch mal das Beispiel **//Contributed Libraries/SimpleOpenNI/User//** an. | ||
+ | |||
+ | In der Funktion drawSkeleton(int userId) gibt es den Abschnitt | ||
+ | <code Java> | ||
+ | // to get the 3d joint data | ||
+ | PVector jointPos = new PVector(); | ||
+ | context.getJointPositionSkeleton(userId,SimpleOpenNI.SKEL_NECK,jointPos); | ||
+ | println(jointPos) | ||
+ | </code> | ||
+ | der die Position des Kopfes einer erkannten Person ausgibt. | ||
+ | |||
+ | |||
+ | |||
+ | ---- | ||
+ | ===== Zusätzliches Spektakel ===== | ||
==== Kinect Physics Tutorial for Processing ==== | ==== Kinect Physics Tutorial for Processing ==== | ||
Zeile 92: | Zeile 115: | ||
<code java> | <code java> | ||
- | // Kinect Physics Example by Amnon Owed (15/09/12) | + | // Kinect Physics Example by Amnon Owed (15/09/12) modified by Corvin Jaedicke (15.10.13) |
// import libraries | // import libraries | ||
Zeile 280: | Zeile 303: | ||
return colorPalette[int(random(1, colorPalette.length))]; | return colorPalette[int(random(1, colorPalette.length))]; | ||
} | } | ||
- | <code> | + | </code> |
+ | |||
+ | Die CustomShape Klasse | ||
+ | |||
+ | <code java> | ||
+ | // usually one would probably make a generic Shape class and subclass different types (circle, polygon), but that | ||
+ | // would mean at least 3 instead of 1 class, so for this tutorial it's a combi-class CustomShape for all types of shapes | ||
+ | // to save some space and keep the code as concise as possible I took a few shortcuts to prevent repeating the same code | ||
+ | class CustomShape { | ||
+ | // to hold the box2d body | ||
+ | Body body; | ||
+ | // to hold the Toxiclibs polygon shape | ||
+ | Polygon2D toxiPoly; | ||
+ | // custom color for each shape | ||
+ | color col; | ||
+ | // radius (also used to distinguish between circles and polygons in this combi-class | ||
+ | float r; | ||
+ | |||
+ | CustomShape(float x, float y, float r) { | ||
+ | this.r = r; | ||
+ | // create a body (polygon or circle based on the r) | ||
+ | makeBody(x, y); | ||
+ | // get a random color | ||
+ | col = getRandomColor(); | ||
+ | } | ||
+ | |||
+ | void makeBody(float x, float y) { | ||
+ | // define a dynamic body positioned at xy in box2d world coordinates, | ||
+ | // create it and set the initial values for this box2d body's speed and angle | ||
+ | BodyDef bd = new BodyDef(); | ||
+ | bd.type = BodyType.DYNAMIC; | ||
+ | bd.position.set(box2d.coordPixelsToWorld(new Vec2(x, y))); | ||
+ | body = box2d.createBody(bd); | ||
+ | body.setLinearVelocity(new Vec2(random(-8, 8), random(2, 8))); | ||
+ | body.setAngularVelocity(random(-5, 5)); | ||
+ | |||
+ | // depending on the r this combi-code creates either a box2d polygon or a circle | ||
+ | if (r == -1) { | ||
+ | // box2d polygon shape | ||
+ | PolygonShape sd = new PolygonShape(); | ||
+ | // toxiclibs polygon creator (triangle, square, etc) | ||
+ | toxiPoly = new Circle(random(5, 20)).toPolygon2D(int(random(3, 6))); | ||
+ | // place the toxiclibs polygon's vertices into a vec2d array | ||
+ | Vec2[] vertices = new Vec2[toxiPoly.getNumPoints()]; | ||
+ | for (int i=0; i<vertices.length; i++) { | ||
+ | Vec2D v = toxiPoly.vertices.get(i); | ||
+ | vertices[i] = box2d.vectorPixelsToWorld(new Vec2(v.x, v.y)); | ||
+ | } | ||
+ | // put the vertices into the box2d shape | ||
+ | sd.set(vertices, vertices.length); | ||
+ | // create the fixture from the shape (deflect things based on the actual polygon shape) | ||
+ | body.createFixture(sd, 1); | ||
+ | } else { | ||
+ | // box2d circle shape of radius r | ||
+ | CircleShape cs = new CircleShape(); | ||
+ | cs.m_radius = box2d.scalarPixelsToWorld(r); | ||
+ | // tweak the circle's fixture def a little bit | ||
+ | FixtureDef fd = new FixtureDef(); | ||
+ | fd.shape = cs; | ||
+ | fd.density = 1; | ||
+ | fd.friction = 0.01; | ||
+ | fd.restitution = 0.3; | ||
+ | // create the fixture from the shape's fixture def (deflect things based on the actual circle shape) | ||
+ | body.createFixture(fd); | ||
+ | } | ||
+ | } | ||
+ | |||
+ | // method to loosely move shapes outside a person's polygon | ||
+ | // (alternatively you could allow or remove shapes inside a person's polygon) | ||
+ | void update() { | ||
+ | // get the screen position from this shape (circle of polygon) | ||
+ | Vec2 posScreen = box2d.getBodyPixelCoord(body); | ||
+ | // turn it into a toxiclibs Vec2D | ||
+ | Vec2D toxiScreen = new Vec2D(posScreen.x, posScreen.y); | ||
+ | // check if this shape's position is inside the person's polygon | ||
+ | boolean inBody = poly.containsPoint(toxiScreen); | ||
+ | // if a shape is inside the person | ||
+ | if (inBody) { | ||
+ | // find the closest point on the polygon to the current position | ||
+ | Vec2D closestPoint = toxiScreen; | ||
+ | float closestDistance = 9999999; | ||
+ | for (Vec2D v : poly.vertices) { | ||
+ | float distance = v.distanceTo(toxiScreen); | ||
+ | if (distance < closestDistance) { | ||
+ | closestDistance = distance; | ||
+ | closestPoint = v; | ||
+ | } | ||
+ | } | ||
+ | // create a box2d position from the closest point on the polygon | ||
+ | Vec2 contourPos = new Vec2(closestPoint.x, closestPoint.y); | ||
+ | Vec2 posWorld = box2d.coordPixelsToWorld(contourPos); | ||
+ | float angle = body.getAngle(); | ||
+ | // set the box2d body's position of this CustomShape to the new position (use the current angle) | ||
+ | body.setTransform(posWorld, angle); | ||
+ | } | ||
+ | } | ||
+ | |||
+ | // display the customShape | ||
+ | void display() { | ||
+ | // get the pixel coordinates of the body | ||
+ | Vec2 pos = box2d.getBodyPixelCoord(body); | ||
+ | pushMatrix(); | ||
+ | // translate to the position | ||
+ | translate(pos.x, pos.y); | ||
+ | noStroke(); | ||
+ | // use the shape's custom color | ||
+ | fill(col); | ||
+ | // depending on the r this combi-code displays either a polygon or a circle | ||
+ | if (r == -1) { | ||
+ | // rotate by the body's angle | ||
+ | float a = body.getAngle(); | ||
+ | rotate(-a); // minus! | ||
+ | gfx.polygon2D(toxiPoly); | ||
+ | } else { | ||
+ | ellipse(0, 0, r*2, r*2); | ||
+ | } | ||
+ | popMatrix(); | ||
+ | } | ||
+ | |||
+ | // if the shape moves off-screen, destroy the box2d body (important!) | ||
+ | // and return true (which will lead to the removal of this CustomShape object) | ||
+ | boolean done() { | ||
+ | Vec2 posScreen = box2d.getBodyPixelCoord(body); | ||
+ | boolean offscreen = posScreen.y > height; | ||
+ | if (offscreen) { | ||
+ | box2d.destroyBody(body); | ||
+ | return true; | ||
+ | } | ||
+ | return false; | ||
+ | } | ||
+ | } | ||
+ | </code> | ||
+ | |||
+ | Die PolygonBlob Klasse | ||
+ | |||
+ | <code java> | ||
+ | // an extended polygon class quite similar to the earlier PolygonBlob class (but extending Toxiclibs' Polygon2D class instead) | ||
+ | // The main difference is that this one is able to create (and destroy) a box2d body from it's own shape | ||
+ | import java.util.Collections; | ||
+ | class PolygonBlob extends Polygon2D { | ||
+ | // to hold the box2d body | ||
+ | Body body; | ||
+ | |||
+ | // the createPolygon() method is nearly identical to the one presented earlier | ||
+ | // see the Kinect Flow Example for a more detailed description of this method (again, feel free to improve it) | ||
+ | void createPolygon() { | ||
+ | ArrayList<ArrayList<PVector>> contours = new ArrayList<ArrayList<PVector>>(); | ||
+ | int selectedContour = 0; | ||
+ | int selectedPoint = 0; | ||
+ | |||
+ | // create contours from blobs | ||
+ | for (int n=0 ; n<theBlobDetection.getBlobNb(); n++) { | ||
+ | Blob b = theBlobDetection.getBlob(n); | ||
+ | if (b != null && b.getEdgeNb() > 100) { | ||
+ | ArrayList<PVector> contour = new ArrayList<PVector>(); | ||
+ | for (int m=0; m<b.getEdgeNb(); m++) { | ||
+ | EdgeVertex eA = b.getEdgeVertexA(m); | ||
+ | EdgeVertex eB = b.getEdgeVertexB(m); | ||
+ | if (eA != null && eB != null) { | ||
+ | EdgeVertex fn = b.getEdgeVertexA((m+1) % b.getEdgeNb()); | ||
+ | EdgeVertex fp = b.getEdgeVertexA((max(0, m-1))); | ||
+ | float dn = dist(eA.x*kinectWidth, eA.y*kinectHeight, fn.x*kinectWidth, fn.y*kinectHeight); | ||
+ | float dp = dist(eA.x*kinectWidth, eA.y*kinectHeight, fp.x*kinectWidth, fp.y*kinectHeight); | ||
+ | if (dn > 15 || dp > 15) { | ||
+ | if (contour.size() > 0) { | ||
+ | contour.add(new PVector(eB.x*kinectWidth, eB.y*kinectHeight)); | ||
+ | contours.add(contour); | ||
+ | contour = new ArrayList<PVector>(); | ||
+ | } else { | ||
+ | contour.add(new PVector(eA.x*kinectWidth, eA.y*kinectHeight)); | ||
+ | } | ||
+ | } else { | ||
+ | contour.add(new PVector(eA.x*kinectWidth, eA.y*kinectHeight)); | ||
+ | } | ||
+ | } | ||
+ | } | ||
+ | } | ||
+ | } | ||
+ | |||
+ | while (contours.size() > 0) { | ||
+ | |||
+ | // find next contour | ||
+ | float distance = 999999999; | ||
+ | if (getNumPoints() > 0) { | ||
+ | Vec2D vecLastPoint = vertices.get(getNumPoints()-1); | ||
+ | PVector lastPoint = new PVector(vecLastPoint.x, vecLastPoint.y); | ||
+ | for (int i=0; i<contours.size(); i++) { | ||
+ | ArrayList<PVector> c = contours.get(i); | ||
+ | PVector fp = c.get(0); | ||
+ | PVector lp = c.get(c.size()-1); | ||
+ | if (fp.dist(lastPoint) < distance) { | ||
+ | distance = fp.dist(lastPoint); | ||
+ | selectedContour = i; | ||
+ | selectedPoint = 0; | ||
+ | } | ||
+ | if (lp.dist(lastPoint) < distance) { | ||
+ | distance = lp.dist(lastPoint); | ||
+ | selectedContour = i; | ||
+ | selectedPoint = 1; | ||
+ | } | ||
+ | } | ||
+ | } else { | ||
+ | PVector closestPoint = new PVector(width, height); | ||
+ | for (int i=0; i<contours.size(); i++) { | ||
+ | ArrayList<PVector> c = contours.get(i); | ||
+ | PVector fp = c.get(0); | ||
+ | PVector lp = c.get(c.size()-1); | ||
+ | if (fp.y > kinectHeight-5 && fp.x < closestPoint.x) { | ||
+ | closestPoint = fp; | ||
+ | selectedContour = i; | ||
+ | selectedPoint = 0; | ||
+ | } | ||
+ | if (lp.y > kinectHeight-5 && lp.x < closestPoint.y) { | ||
+ | closestPoint = lp; | ||
+ | selectedContour = i; | ||
+ | selectedPoint = 1; | ||
+ | } | ||
+ | } | ||
+ | } | ||
+ | |||
+ | // add contour to polygon | ||
+ | ArrayList<PVector> contour = contours.get(selectedContour); | ||
+ | if (selectedPoint > 0) { Collections.reverse(contour); } | ||
+ | for (PVector p : contour) { | ||
+ | add(new Vec2D(p.x, p.y)); | ||
+ | } | ||
+ | contours.remove(selectedContour); | ||
+ | } | ||
+ | } | ||
+ | |||
+ | // creates a shape-deflecting physics chain in the box2d world from this polygon | ||
+ | void createBody() { | ||
+ | // for stability the body is always created (and later destroyed) | ||
+ | BodyDef bd = new BodyDef(); | ||
+ | body = box2d.createBody(bd); | ||
+ | // if there are more than 0 points (aka a person on screen)... | ||
+ | if (getNumPoints() > 0) { | ||
+ | // create a vec2d array of vertices in box2d world coordinates from this polygon | ||
+ | Vec2[] verts = new Vec2[getNumPoints()]; | ||
+ | for (int i=0; i<getNumPoints(); i++) { | ||
+ | Vec2D v = vertices.get(i); | ||
+ | verts[i] = box2d.coordPixelsToWorld(v.x, v.y); | ||
+ | } | ||
+ | // create a chain from the array of vertices | ||
+ | ChainShape chain = new ChainShape(); | ||
+ | chain.createChain(verts, verts.length); | ||
+ | // create fixture in body from the chain (this makes it actually deflect other shapes) | ||
+ | body.createFixture(chain, 1); | ||
+ | } | ||
+ | } | ||
+ | |||
+ | // destroy the box2d body (important!) | ||
+ | void destroyBody() { | ||
+ | box2d.destroyBody(body); | ||
+ | } | ||
+ | } | ||
+ | </code> | ||
+ | |||
+ | ==== KinectFlow ==== | ||
+ | [[http://www.creativeapplications.net/wp-content/uploads/2012/09/AmnonOwed-KinectPhysics-04.png|{{http://www.creativeapplications.net/wp-content/uploads/2012/09/AmnonOwed-KinectPhysics-04.png}}]] | ||
+ | |||
+ | <code java> | ||
+ | // Kinect Flow Example by Amnon Owed (15/09/12) modified by Corvin Jaedicke (15.10.13) | ||
+ | |||
+ | // import libraries | ||
+ | import processing.opengl.*; // opengl | ||
+ | import SimpleOpenNI.*; // kinect | ||
+ | import blobDetection.*; // blobs | ||
+ | |||
+ | // this is a regular java import so we can use and extend the polygon class (see PolygonBlob) | ||
+ | import java.awt.Polygon; | ||
+ | |||
+ | // declare SimpleOpenNI object | ||
+ | SimpleOpenNI context; | ||
+ | // declare BlobDetection object | ||
+ | BlobDetection theBlobDetection; | ||
+ | // declare custom PolygonBlob object (see class for more info) | ||
+ | PolygonBlob poly = new PolygonBlob(); | ||
+ | |||
+ | // PImage to hold incoming imagery and smaller one for blob detection | ||
+ | PImage cam, blobs; | ||
+ | int[] user; | ||
+ | // the kinect's dimensions to be used later on for calculations | ||
+ | int kinectWidth = 640; | ||
+ | int kinectHeight = 480; | ||
+ | // to center and rescale from 640x480 to higher custom resolutions | ||
+ | float reScale; | ||
+ | |||
+ | // background color | ||
+ | color bgColor; | ||
+ | // three color palettes (artifact from me storing many interesting color palettes as strings in an external data file ;-) | ||
+ | String[] palettes = { | ||
+ | "-1117720,-13683658,-8410437,-9998215,-1849945,-5517090,-4250587,-14178341,-5804972,-3498634", | ||
+ | "-67879,-9633503,-8858441,-144382,-4996094,-16604779,-588031", | ||
+ | "-16711663,-13888933,-9029017,-5213092,-1787063,-11375744,-2167516,-15713402,-5389468,-2064585" | ||
+ | }; | ||
+ | |||
+ | // an array called flow of 2250 Particle objects (see Particle class) | ||
+ | Particle[] flow = new Particle[2000]; | ||
+ | // global variables to influence the movement of all particles | ||
+ | float globalX, globalY; | ||
+ | |||
+ | void setup() { | ||
+ | // it's possible to customize this, for example 1920x1080 | ||
+ | size(1280, 720, OPENGL); | ||
+ | // initialize SimpleOpenNI object | ||
+ | context = new SimpleOpenNI(this); | ||
+ | if(context.isInit() == false) { | ||
+ | // if context.enableScene() returns false | ||
+ | // then the Kinect is not working correctly | ||
+ | // make sure the green light is blinking | ||
+ | println("Kinect not connected!"); | ||
+ | exit(); | ||
+ | return; | ||
+ | } else { | ||
+ | // mirror the image to be more intuitive | ||
+ | context.enableDepth(); | ||
+ | context.enableUser(); | ||
+ | context.setMirror(true); | ||
+ | // calculate the reScale value | ||
+ | // currently it's rescaled to fill the complete width (cuts of top-bottom) | ||
+ | // it's also possible to fill the complete height (leaves empty sides) | ||
+ | reScale = (float) width / kinectWidth; | ||
+ | // create a smaller blob image for speed and efficiency | ||
+ | blobs = createImage(kinectWidth/3, kinectHeight/3, RGB); | ||
+ | // initialize blob detection object to the blob image dimensions | ||
+ | theBlobDetection = new BlobDetection(blobs.width, blobs.height); | ||
+ | theBlobDetection.setThreshold(0.2); | ||
+ | setupFlowfield(); | ||
+ | } | ||
+ | } | ||
+ | |||
+ | void draw() { | ||
+ | // fading background | ||
+ | noStroke(); | ||
+ | fill(bgColor, 65); | ||
+ | rect(0, 0, width, height); | ||
+ | // update the SimpleOpenNI object | ||
+ | context.update(); | ||
+ | // put the image into a PImage | ||
+ | |||
+ | user = context.userMap(); | ||
+ | cam = context.userImage(); | ||
+ | |||
+ | // populate the pixels array | ||
+ | // from the sketch's current contents | ||
+ | //loadPixels(); | ||
+ | for (int i = 0; i < cam.pixels.length; i++) { | ||
+ | // if the current pixel is on a user | ||
+ | if (user[i] > 0) { | ||
+ | // make it green | ||
+ | cam.pixels[i] = color(0,0,255); | ||
+ | }else{ | ||
+ | cam.pixels[i] = color(0,0,0); | ||
+ | } | ||
+ | } | ||
+ | // copy the image into the smaller blob imag | ||
+ | blobs.copy(cam, 0, 0, cam.width, cam.height, 0, 0, blobs.width, blobs.height); | ||
+ | // blur the blob image | ||
+ | blobs.filter(BLUR); | ||
+ | // detect the blobs | ||
+ | theBlobDetection.computeBlobs(blobs.pixels); | ||
+ | // clear the polygon (original functionality) | ||
+ | poly.reset(); | ||
+ | // create the polygon from the blobs (custom functionality, see class) | ||
+ | poly.createPolygon(); | ||
+ | drawFlowfield(); | ||
+ | } | ||
+ | |||
+ | void setupFlowfield() { | ||
+ | // set stroke weight (for particle display) to 2.5 | ||
+ | strokeWeight(2.5); | ||
+ | // initialize all particles in the flow | ||
+ | for(int i=0; i<flow.length; i++) { | ||
+ | flow[i] = new Particle(i/10000.0); | ||
+ | } | ||
+ | // set all colors randomly now | ||
+ | setRandomColors(1); | ||
+ | } | ||
+ | |||
+ | void drawFlowfield() { | ||
+ | // center and reScale from Kinect to custom dimensions | ||
+ | translate(0, (height-kinectHeight*reScale)/2); | ||
+ | scale(reScale); | ||
+ | // set global variables that influence the particle flow's movement | ||
+ | globalX = noise(frameCount * 0.01) * width/2 + width/4; | ||
+ | globalY = noise(frameCount * 0.005 + 5) * height; | ||
+ | // update and display all particles in the flow | ||
+ | for (Particle p : flow) { | ||
+ | p.updateAndDisplay(); | ||
+ | } | ||
+ | // set the colors randomly every 240th frame | ||
+ | setRandomColors(240); | ||
+ | } | ||
+ | |||
+ | // sets the colors every nth frame | ||
+ | void setRandomColors(int nthFrame) { | ||
+ | if (frameCount % nthFrame == 0) { | ||
+ | // turn a palette into a series of strings | ||
+ | String[] paletteStrings = split(palettes[int(random(palettes.length))], ","); | ||
+ | // turn strings into colors | ||
+ | color[] colorPalette = new color[paletteStrings.length]; | ||
+ | for (int i=0; i<paletteStrings.length; i++) { | ||
+ | colorPalette[i] = int(paletteStrings[i]); | ||
+ | } | ||
+ | // set background color to first color from palette | ||
+ | bgColor = colorPalette[0]; | ||
+ | // set all particle colors randomly to color from palette (excluding first aka background color) | ||
+ | for (int i=0; i<flow.length; i++) { | ||
+ | flow[i].col = colorPalette[int(random(1, colorPalette.length))]; | ||
+ | } | ||
+ | } | ||
+ | } | ||
+ | </code> | ||
+ | |||
+ | Die Particle Klasse | ||
+ | |||
+ | <code java> | ||
+ | // a basic noise-based moving particle | ||
+ | class Particle { | ||
+ | // unique id, (previous) position, speed | ||
+ | float id, x, y, xp, yp, s, d; | ||
+ | color col; // color | ||
+ | |||
+ | Particle(float id) { | ||
+ | this.id = id; | ||
+ | s = random(2, 6); // speed | ||
+ | } | ||
+ | |||
+ | void updateAndDisplay() { | ||
+ | // let it flow, end with a new x and y position | ||
+ | id += 0.01; | ||
+ | d = (noise(id, x/globalY, y/globalY)-0.5)*globalX; | ||
+ | x += cos(radians(d))*s; | ||
+ | y += sin(radians(d))*s; | ||
+ | |||
+ | // constrain to boundaries | ||
+ | if (x<-10) x=xp=kinectWidth+10; | ||
+ | if (x>kinectWidth+10) x=xp=-10; | ||
+ | if (y<-10) y=yp=kinectHeight+10; | ||
+ | if (y>kinectHeight+10) y=yp=-10; | ||
+ | |||
+ | // if there is a polygon (more than 0 points) | ||
+ | if (poly.npoints > 0) { | ||
+ | // if this particle is outside the polygon | ||
+ | if (!poly.contains(x, y)) { | ||
+ | // while it is outside the polygon | ||
+ | while(!poly.contains(x, y)) { | ||
+ | // randomize x and y | ||
+ | x = random(kinectWidth); | ||
+ | y = random(kinectHeight); | ||
+ | } | ||
+ | // set previous x and y, to this x and y | ||
+ | xp=x; | ||
+ | yp=y; | ||
+ | } | ||
+ | } | ||
+ | |||
+ | // individual particle color | ||
+ | stroke(col); | ||
+ | // line from previous to current position | ||
+ | line(xp, yp, x, y); | ||
+ | |||
+ | // set previous to current position | ||
+ | xp=x; | ||
+ | yp=y; | ||
+ | } | ||
+ | } | ||
+ | </code> | ||
+ | |||
+ | Die PolygonBlob Klasse | ||
+ | |||
+ | <code java> | ||
+ | // an extended polygon class with my own customized createPolygon() method (feel free to improve!) | ||
+ | import java.util.Collections; | ||
+ | class PolygonBlob extends Polygon { | ||
+ | |||
+ | // took me some time to make this method fully self-sufficient | ||
+ | // now it works quite well in creating a correct polygon from a person's blob | ||
+ | // of course many thanks to v3ga, because the library already does a lot of the work | ||
+ | void createPolygon() { | ||
+ | // an arrayList... of arrayLists... of PVectors | ||
+ | // the arrayLists of PVectors are basically the person's contours (almost but not completely in a polygon-correct order) | ||
+ | ArrayList<ArrayList<PVector>> contours = new ArrayList<ArrayList<PVector>>(); | ||
+ | // helpful variables to keep track of the selected contour and point (start/end point) | ||
+ | int selectedContour = 0; | ||
+ | int selectedPoint = 0; | ||
+ | |||
+ | // create contours from blobs | ||
+ | // go over all the detected blobs | ||
+ | for (int n=0 ; n<theBlobDetection.getBlobNb(); n++) { | ||
+ | Blob b = theBlobDetection.getBlob(n); | ||
+ | // for each substantial blob... | ||
+ | if (b != null && b.getEdgeNb() > 100) { | ||
+ | // create a new contour arrayList of PVectors | ||
+ | ArrayList<PVector> contour = new ArrayList<PVector>(); | ||
+ | // go over all the edges in the blob | ||
+ | for (int m=0; m<b.getEdgeNb(); m++) { | ||
+ | // get the edgeVertices of the edge | ||
+ | EdgeVertex eA = b.getEdgeVertexA(m); | ||
+ | EdgeVertex eB = b.getEdgeVertexB(m); | ||
+ | // if both ain't null... | ||
+ | if (eA != null && eB != null) { | ||
+ | // get next and previous edgeVertexA | ||
+ | EdgeVertex fn = b.getEdgeVertexA((m+1) % b.getEdgeNb()); | ||
+ | EdgeVertex fp = b.getEdgeVertexA((max(0, m-1))); | ||
+ | // calculate distance between vertexA and next and previous edgeVertexA respectively | ||
+ | // positions are multiplied by kinect dimensions because the blob library returns normalized values | ||
+ | float dn = dist(eA.x*kinectWidth, eA.y*kinectHeight, fn.x*kinectWidth, fn.y*kinectHeight); | ||
+ | float dp = dist(eA.x*kinectWidth, eA.y*kinectHeight, fp.x*kinectWidth, fp.y*kinectHeight); | ||
+ | // if either distance is bigger than 15 | ||
+ | if (dn > 15 || dp > 15) { | ||
+ | // if the current contour size is bigger than zero | ||
+ | if (contour.size() > 0) { | ||
+ | // add final point | ||
+ | contour.add(new PVector(eB.x*kinectWidth, eB.y*kinectHeight)); | ||
+ | // add current contour to the arrayList | ||
+ | contours.add(contour); | ||
+ | // start a new contour arrayList | ||
+ | contour = new ArrayList<PVector>(); | ||
+ | // if the current contour size is 0 (aka it's a new list) | ||
+ | } else { | ||
+ | // add the point to the list | ||
+ | contour.add(new PVector(eA.x*kinectWidth, eA.y*kinectHeight)); | ||
+ | } | ||
+ | // if both distance are smaller than 15 (aka the points are close) | ||
+ | } else { | ||
+ | // add the point to the list | ||
+ | contour.add(new PVector(eA.x*kinectWidth, eA.y*kinectHeight)); | ||
+ | } | ||
+ | } | ||
+ | } | ||
+ | } | ||
+ | } | ||
+ | |||
+ | // at this point in the code we have a list of contours (aka an arrayList of arrayLists of PVectors) | ||
+ | // now we need to sort those contours into a correct polygon. To do this we need two things: | ||
+ | // 1. The correct order of contours | ||
+ | // 2. The correct direction of each contour | ||
+ | |||
+ | // as long as there are contours left... | ||
+ | while (contours.size() > 0) { | ||
+ | |||
+ | // find next contour | ||
+ | float distance = 999999999; | ||
+ | // if there are already points in the polygon | ||
+ | if (npoints > 0) { | ||
+ | // use the polygon's last point as a starting point | ||
+ | PVector lastPoint = new PVector(xpoints[npoints-1], ypoints[npoints-1]); | ||
+ | // go over all contours | ||
+ | for (int i=0; i<contours.size(); i++) { | ||
+ | ArrayList<PVector> c = contours.get(i); | ||
+ | // get the contour's first point | ||
+ | PVector fp = c.get(0); | ||
+ | // get the contour's last point | ||
+ | PVector lp = c.get(c.size()-1); | ||
+ | // if the distance between the current contour's first point and the polygon's last point is smaller than distance | ||
+ | if (fp.dist(lastPoint) < distance) { | ||
+ | // set distance to this distance | ||
+ | distance = fp.dist(lastPoint); | ||
+ | // set this as the selected contour | ||
+ | selectedContour = i; | ||
+ | // set selectedPoint to 0 (which signals first point) | ||
+ | selectedPoint = 0; | ||
+ | } | ||
+ | // if the distance between the current contour's last point and the polygon's last point is smaller than distance | ||
+ | if (lp.dist(lastPoint) < distance) { | ||
+ | // set distance to this distance | ||
+ | distance = lp.dist(lastPoint); | ||
+ | // set this as the selected contour | ||
+ | selectedContour = i; | ||
+ | // set selectedPoint to 1 (which signals last point) | ||
+ | selectedPoint = 1; | ||
+ | } | ||
+ | } | ||
+ | // if the polygon is still empty | ||
+ | } else { | ||
+ | // use a starting point in the lower-right | ||
+ | PVector closestPoint = new PVector(width, height); | ||
+ | // go over all contours | ||
+ | for (int i=0; i<contours.size(); i++) { | ||
+ | ArrayList<PVector> c = contours.get(i); | ||
+ | // get the contour's first point | ||
+ | PVector fp = c.get(0); | ||
+ | // get the contour's last point | ||
+ | PVector lp = c.get(c.size()-1); | ||
+ | // if the first point is in the lowest 5 pixels of the (kinect) screen and more to the left than the current closestPoint | ||
+ | if (fp.y > kinectHeight-5 && fp.x < closestPoint.x) { | ||
+ | // set closestPoint to first point | ||
+ | closestPoint = fp; | ||
+ | // set this as the selected contour | ||
+ | selectedContour = i; | ||
+ | // set selectedPoint to 0 (which signals first point) | ||
+ | selectedPoint = 0; | ||
+ | } | ||
+ | // if the last point is in the lowest 5 pixels of the (kinect) screen and more to the left than the current closestPoint | ||
+ | if (lp.y > kinectHeight-5 && lp.x < closestPoint.y) { | ||
+ | // set closestPoint to last point | ||
+ | closestPoint = lp; | ||
+ | // set this as the selected contour | ||
+ | selectedContour = i; | ||
+ | // set selectedPoint to 1 (which signals last point) | ||
+ | selectedPoint = 1; | ||
+ | } | ||
+ | } | ||
+ | } | ||
+ | |||
+ | // add contour to polygon | ||
+ | ArrayList<PVector> contour = contours.get(selectedContour); | ||
+ | // if selectedPoint is bigger than zero (aka last point) then reverse the arrayList of points | ||
+ | if (selectedPoint > 0) { Collections.reverse(contour); } | ||
+ | // add all the points in the contour to the polygon | ||
+ | for (PVector p : contour) { | ||
+ | addPoint(int(p.x), int(p.y)); | ||
+ | } | ||
+ | // remove this contour from the list of contours | ||
+ | contours.remove(selectedContour); | ||
+ | // the while loop above makes all of this code loop until the number of contours is zero | ||
+ | // at that time all the points in all the contours have been added to the polygon... in the correct order (hopefully) | ||
+ | } | ||
+ | } | ||
+ | } | ||
+ | </code> | ||