Benutzer-Werkzeuge

Webseiten-Werkzeuge


skriptsose2020:code-beispiele_aus_den_vorlesungnsvideos_sose2020

Unterschiede

Hier werden die Unterschiede zwischen zwei Versionen gezeigt.

Link zu dieser Vergleichsansicht

Beide Seiten der vorigen Revision Vorhergehende Überarbeitung
Nächste Überarbeitung
Vorhergehende Überarbeitung
skriptsose2020:code-beispiele_aus_den_vorlesungnsvideos_sose2020 [2020/05/13 15:10]
d.golovko
skriptsose2020:code-beispiele_aus_den_vorlesungnsvideos_sose2020 [2020/06/18 01:10] (aktuell)
d.golovko
Zeile 337: Zeile 337:
   robot2.move(2);​   robot2.move(2);​
   float distance = robot1.calculateDistance(robot2);​   float distance = robot1.calculateDistance(robot2);​
-  if (distance <= 0) { // dies ist anders als im Vorlesungs-Video,​ weil die Radienlängen in der Methode calculateDistance() abgezogen werden+  if (distance <= 0) {  
 +  ​// dies ist anders als im Vorlesungs-Video,​ weil die Radienlängen in der Methode calculateDistance() abgezogen werden
     robot1.turn(PI);​     robot1.turn(PI);​
     robot2.turn(PI);​     robot2.turn(PI);​
Zeile 392: Zeile 393:
 </​hidden>​ </​hidden>​
 \\ \\
 +
 +===Woche 4: Bibliotheken===
 +{{anchor:​woche4}}
 +
 +<hidden Video-Bibliothek:​ Abzug des Hintergrunds>​
 +<code java>
 +/**
 + * Background Subtraction ​
 + * by Golan Levin. ​
 + *
 + * Detect the presence of people and objects in the frame using a simple
 + * background-subtraction technique. To initialize the background, press a key.
 + */
 +
 +
 +import processing.video.*;​
 +
 +int numPixels;
 +int[] backgroundPixels;​
 +Capture video;
 +
 +void setup() {
 +  size(640, 480); 
 +  ​
 +  // This the default video input, see the GettingStartedCapture ​
 +  // example if it creates an error
 +  //video = new Capture(this,​ 160, 120);
 +  String[] cameras = Capture.list();​
 +  video = new Capture(this,​ cameras[0]);​
 +  ​
 +  // Start capturing the images from the camera
 +  video.start();  ​
 +  ​
 +  numPixels = video.width * video.height;​
 +  // Create array to store the background image
 +  backgroundPixels = new int[numPixels];​
 +  // Make the pixels[] array available for direct manipulation
 +  loadPixels();​
 +}
 +
 +void draw() {
 +  if (video.available()) {
 +    video.read();​ // Read a new video frame
 +    video.loadPixels();​ // Make the pixels of video available
 +    // Difference between the current frame and the stored background
 +    int presenceSum = 0;
 +    for (int i = 0; i < numPixels; i++) { // For each pixel in the video frame...
 +      // Fetch the current color in that location, and also the color
 +      // of the background in that spot
 +      color currColor = video.pixels[i];​
 +      color bkgdColor = backgroundPixels[i];​
 +      // Extract the red, green, and blue components of the current pixel'​s color
 +      int currR = (currColor >> 16) & 0xFF;
 +      int currG = (currColor >> 8) & 0xFF;
 +      int currB = currColor & 0xFF;
 +      // Extract the red, green, and blue components of the background pixel'​s color
 +      int bkgdR = (bkgdColor >> 16) & 0xFF;
 +      int bkgdG = (bkgdColor >> 8) & 0xFF;
 +      int bkgdB = bkgdColor & 0xFF;
 +      // Compute the difference of the red, green, and blue values
 +      int diffR = abs(currR - bkgdR);
 +      int diffG = abs(currG - bkgdG);
 +      int diffB = abs(currB - bkgdB);
 +      // Add these differences to the running tally
 +      presenceSum += diffR + diffG + diffB;
 +      // Render the difference image to the screen
 +      pixels[i] = color(diffR,​ diffG, diffB);
 +      // The following line does the same thing much faster, but is more technical
 +      //pixels[i] = 0xFF000000 | (diffR << 16) | (diffG << 8) | diffB;
 +    }
 +    updatePixels();​ // Notify that the pixels[] array has changed
 +    println(presenceSum);​ // Print out the total amount of movement
 +  }
 +}
 +
 +// When a key is pressed, capture the background image into the backgroundPixels
 +// buffer, by copying each of the current frame'​s pixels into it.
 +void keyPressed() {
 +  video.loadPixels();​
 +  arraycopy(video.pixels,​ backgroundPixels);​
 +}
 +</​code>​
 +</​hidden>​
 +
 +<hidden OpenCV: Gesichtserkennung>​
 +<code java>
 +import gab.opencv.*;​
 +import java.awt.Rectangle;​
 +import processing.video.*;​
 +
 +OpenCV opencv;
 +Rectangle[] faces;
 +Capture video;
 +
 +void setup() {
 +  size(640, 480);
 +  video = new Capture(this,​ Capture.list()[0]);​
 +  opencv = new OpenCV(this,​ width, height);
 +  opencv.loadCascade(OpenCV.CASCADE_FRONTALFACE);  ​
 +  ​
 +  video.start();​
 +}
 +
 +void draw() {
 +  video.read();​
 +  opencv.loadImage(video);​
 +  ​
 +  image(video,​ 0, 0); // display a video frame on the drawing canvas
 +
 +  noFill();
 +  stroke(0, 255, 0);
 +  strokeWeight(3);​
 +  Rectangle[] faces = opencv.detect();​
 +  for (int i = 0; i < faces.length;​ i++) {
 +    rect(faces[i].x,​ faces[i].y, faces[i].width,​ faces[i].height);​
 +  }
 +}
 +</​code>​
 +</​hidden>​
 +
 +<hidden Bildverarbeitung:​ Kantendetektion>​
 +<code java>
 +import gab.opencv.*;​
 +import processing.video.*;​
 +
 +OpenCV opencv;
 +Capture video;
 +
 +void setup() {
 +  ​
 +  size(640, 480);
 +  video = new Capture(this,​ Capture.list()[0]);​
 +  opencv = new OpenCV(this,​ width, height);
 +  video.start();​
 +
 +}
 +
 +void draw() {
 +  video.read();​ // read current video frame 
 +  opencv.loadImage(video);​ // and load it into the opencv object
 +  opencv.findCannyEdges(20,​ 75); // find edges
 +  PImage canny = opencv.getSnapshot();​ // get the black-and-white resulting image
 +  image(canny,​ 0, 0); // display the image on the drawing canvas
 +}
 +</​code>​
 +</​hidden>​
 +
 +<hidden Bildverarbeitung:​ Hough-Transformation>​
 +<code java>
 +import gab.opencv.*;​
 +import processing.video.*;​
 +
 +OpenCV opencv;
 +Capture video;
 +
 +void setup() {
 +
 +  size(640, 480);
 +  video = new Capture(this,​ Capture.list()[0]);​
 +  opencv = new OpenCV(this,​ width, height);
 +  video.start();​
 +}
 +
 +void draw() {
 +  video.read();​ // read current video frame 
 +  opencv.loadImage(video);​ // and load it into the opencv object
 +  opencv.findCannyEdges(20,​ 75); // find edges
 +  opencv.getSnapshot();​ // get the black-and-white resulting image
 +  image(video,​ 0, 0); // display the video frame on the drawing canvas
 +
 +  // Find lines with Hough line detection
 +  // Arguments are: threshold, minLength, maxLineGap
 +  ArrayList<​Line>​ lines = opencv.findLines(100,​ 30, 20);
 +
 +  for (Line line : lines) {
 +    // lines include angle in radians, measured in double precision
 +    // so we can select out vertical and horizontal lines
 +    // They also include "​start"​ and "​end"​ PVectors with the position
 +    if (line.angle >= radians(0) && line.angle < radians(1)) {
 +      stroke(0, 255, 0);
 +      line(line.start.x,​ line.start.y,​ line.end.x, line.end.y);​
 +    }
 +
 +    if (line.angle > radians(89) && line.angle < radians(91)) {
 +      stroke(255, 0, 0);
 +      line(line.start.x,​ line.start.y,​ line.end.x, line.end.y);​
 +    }
 +  }
 +}
 +</​code>​
 +</​hidden>​
 +
skriptsose2020/code-beispiele_aus_den_vorlesungnsvideos_sose2020.1589375443.txt.gz · Zuletzt geändert: 2020/05/13 15:10 von d.golovko