Here is a Pygame Sprite animation using the approach presented by Joe Wreschnig and Nicolas Crovatti. It’s not yet exactly what I need but is very suitable.
import pygame, random
from pygame.locals import *
class Char(pygame.sprite.Sprite):
x,y = (100,0)
def __init__(self, img, frames=1, modes=1, w=32, h=32, fps=3):
pygame.sprite.Sprite.__init__(self)
original_width, original_height = img.get_size()
self._w = w
self._h = h
self._framelist = []
for i in xrange(int(original_width/w)):
self._framelist.append(img.subsurface((i*w,0,w,h)))
self.image = self._framelist[0]
self._start = pygame.time.get_ticks()
self._delay = 1000 / fps
self._last_update = 0
self._frame = 0
self.update(pygame.time.get_ticks(), 100, 100)
def set_pos(self, x, y):
self.x = x
self.y = y
def get_pos(self):
return (self.x,self.y)
def update(self, t, width, height):
# postion
self.y+=1
if(self.y>width):
self.x = random.randint(0,height-self._w)
self.y = -self._h
# animation
if t - self._last_update > self._delay:
self._frame += 1
if self._frame >= len(self._framelist):
self._frame = 0
self.image = self._framelist[self._frame]
self._last_update = t
SCREEN_W, SCREEN_H = (320, 320)
def main():
pygame.init()
screen = pygame.display.set_mode((SCREEN_W, SCREEN_H))
background = pygame.image.load("field.png")
img_orc = pygame.image.load("orc.png")
orc = Char(img_orc, 4, 1, 32, 48)
while pygame.event.poll().type != KEYDOWN:
screen.blit(background, (0,0))
screen.blit(orc.image, orc.get_pos())
orc.update(pygame.time.get_ticks(), SCREEN_W, SCREEN_H)
pygame.display.update()
pygame.time.delay(10)
if __name__ == '__main__': main()
Here is it working:
Uptade: I put this source and images at the OpenPixel project in Github
Here’s a simple key handle in Pygame wheres you move a circle using keyboard.
import pygame
from pygame.locals import *
def main():
x,y = (100,100)
pygame.init()
screen = pygame.display.set_mode((400, 400))
while 1:
pygame.time.delay(1000/60)
# exit handle
for event in pygame.event.get():
if event.type == QUIT:
return
elif event.type == KEYDOWN and event.key == K_ESCAPE:
return
# keys handle
key=pygame.key.get_pressed()
if key[K_LEFT]:
x-=1
if key[K_RIGHT]:
x+=1
if key[K_UP]:
y-=1
if key[K_DOWN]:
y+=1
# fill background and draw a white circle
screen.fill((255,255,255))
pygame.draw.circle(screen, (0,0,0), [x,y], 30)
pygame.display.flip()
if __name__ == '__main__': main()
Here’s a video of it working:
Function pygame.key.get_pressed Returns a sequence of boolean values representing the state of every key on the keyboard. It’s very useful because usually on others game platforms I have to create it by myself.
This approach allow me to handle more than one key at time. For example, left and up keys can be pressed and each one is handled separately creating a diagonal movement.
Here a simple OpenCV example of separation of a image into its hue, saturation and value channels.
#include
#include
#include
int main( int argc, char **argv ){
IplImage *img, *hsv, *hue, *sat, *val;
int key = 0, depth;
CvSize size;
/* Load command line passed image, check it. */
if (argc>1) {
img = cvLoadImage(argv[1], CV_LOAD_IMAGE_COLOR);
if(!img){
printf("Could not open image.");
exit -1;
}
if(img->nChannels!=3){
printf("We need color image!");
exit -1;
}
} else {
printf("Usage: %s VIDEO_FILE\n", argv[0]);
return 1;
}
/* Create a hsv image with 3 channels and hue, sat e val with 1 channel.
All with the same size */
size = cvGetSize(img);
depth = img->depth;
hue = cvCreateImage(size, depth, 1);
sat = cvCreateImage(size, depth, 1);
val = cvCreateImage(size, depth, 1);
hsv = cvCreateImage(size, depth, 3);
cvZero(hue);
cvZero(sat);
cvZero(val);
cvZero(hsv);
/* Convert from Red-Green-Blue to Hue-Saturation-Value */
cvCvtColor( img, hsv, CV_BGR2HSV );
/* Split hue, saturation and value of hsv on them */
cvSplit(hsv, hue, sat, val, 0);
/* Create windows, display them, wait for a key */
cvNamedWindow("original", CV_WINDOW_AUTOSIZE);
cvNamedWindow("hue", CV_WINDOW_AUTOSIZE);
cvNamedWindow("saturation", CV_WINDOW_AUTOSIZE);
cvNamedWindow("value", CV_WINDOW_AUTOSIZE);
cvShowImage("original", img);
cvShowImage("hue", hue);
cvShowImage("saturation", sat);
cvShowImage("value", val);
cvWaitKey(0);
/* Free memory and get out */
cvDestroyWindow("original");
cvDestroyWindow("hue");
cvDestroyWindow("saturation");
cvDestroyWindow("value");
cvReleaseImage(&img);
cvReleaseImage(&hsv);
cvReleaseImage(&hue);
cvReleaseImage(&sat);
cvReleaseImage(&val);
return 0;
}
Resized original image, photo by Robert Bradshaw at Wikimedia Commons.
Hue channel:
Saturation channel:
Value channel:
This is a very simple example of how to open two images and display them added.
I got two pictures at project Commons from Wikimedia that were highlighted on Featured Pictures. I did a crop on both to have the same size, as I’m trying to make this example as simple as possible.
The first one is a photo of our Milky Way, taken at Paranal Observatory by Stéphane Guisard.
The second one is a California surfer inside wave, taken by Mila Zinkova.
In this simple OpenCV code below, we open the images, create a new one to display the result and use cvAdd to add them. We do not save the result or handle more than the ordinary case of two images with the same size.
#include
#include
#include
int main( int argc, char **argv ){
IplImage *surfer, *milkyway, *result;
int key = 0;
CvSize size;
/* load images, check, get size (both should have the same) */
surfer = cvLoadImage("surfer.jpg", CV_LOAD_IMAGE_COLOR);
milkyway = cvLoadImage("milkyway.jpg", CV_LOAD_IMAGE_COLOR);
if((!surfer)||(!milkyway)){
printf("Could not open one or more images.");
exit -1;
}
size = cvGetSize(surfer);
/* create a empty image, same size, depth and channels of others */
result = cvCreateImage(size, surfer->depth, surfer->nChannels);
cvZero(result);
/* result = surfer + milkyway (NULL mask)*/
cvAdd(surfer, milkyway, result, NULL);
/* create a window, display the result, wait for a key */
cvNamedWindow("example", CV_WINDOW_AUTOSIZE);
cvShowImage("example", result);
cvWaitKey(0);
/* free memory and get out */
cvDestroyWindow("example");
cvReleaseImage(&surfer);
cvReleaseImage(&milkyway);
cvReleaseImage(&result);
return 0;
}
/* gcc add.c -o add `pkg-config opencv --libs --cflags` */
Compile it (on a well configured OpenCV development environment) and run it:
gcc add.c -o add `pkg-config opencv –libs –cflags`
./add
The result got pretty cool, a milky way surfer.
This is a simple example of how pass edge detection in a video using OpenCV. It uses the built-in OpenCV Canny edge detector algorithm.
#include
#include
#include
int main(int argc, char *argv[]) {
int delay = 0, key=0, i=0;
char *window_name;
CvCapture *video = NULL;
IplImage *frame = NULL;
IplImage *grey = NULL;
IplImage *edges = NULL;
/* check for video file passed by command line */
if (argc>1) {
video = cvCaptureFromFile(argv[1]);
} else {
printf("Usage: %s VIDEO_FILE\n", argv[0]);
return 1;
}
/* check file was correctly opened */
if (!video) {
printf("Unable to open \"%s\"\n", argv[1]);
return 1;
}
/* create a video window with same name of the video file, auto sized */
window_name = argv[1];
cvNamedWindow(window_name, CV_WINDOW_AUTOSIZE);
/* Get the first frame and create a edges image with the same size */
frame = cvQueryFrame(video);
grey = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 1);
edges = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 1);
/* calculate the delay between each frame and display video's FPS */
printf("%2.2f FPS\n", cvGetCaptureProperty(video, CV_CAP_PROP_FPS));
delay = (int) (1000/cvGetCaptureProperty(video, CV_CAP_PROP_FPS));
while (frame) {
/* Edges on the input gray image (needs to be grayscale) using the Canny algorithm.
Uses two threshold and a aperture parameter for Sobel operator. */
cvCvtColor(frame, grey, CV_BGR2GRAY);
cvCanny( grey, edges, 1.0, 1.0, 3);
/* show loaded frame */
cvShowImage(window_name, edges);
/* load and check next frame*/
frame = cvQueryFrame(video);
if(!frame) {
printf("error loading frame.\n");
return 1;
}
/* wait delay and check for the quit key */
key = cvWaitKey(delay);
if(key=='q') break;
}
}
To compile it in a well configured OpenCV development environment:
gcc edgeplayer.c -o edgeplayer `pkg-config opencv –libs –cflags`
To run it call edgeplayer and the name of the video:
./edgeplayer rick.avi
The result is something similar to this:
humanidade, upload feito originalmente por Silveira Neto.
Here’s a simple video player that also performs facial detection thought the Open Computer Vision Library.
Here’s a code developed using codes from nashruddin.com and samples from OpenCV, including the haar classifier xml. More detailed explanation on the theory about how the OpenCV face detection algorithm works can be found here.
The code:
#include
#include
#include
CvHaarClassifierCascade *cascade;
CvMemStorage *storage;
int main(int argc, char *argv[]) {
CvCapture *video = NULL;
IplImage *frame = NULL;
int delay = 0, key, i=0;
char *window_name = "Video";
char *cascadefile = "haarcascade_frontalface_alt.xml";
/* check for video file passed by command line */
if (argc>1) {
video = cvCaptureFromFile(argv[1]);
}
else {
printf("Usage: %s VIDEO_FILE\n", argv[0]);
return 1;
}
/* check file was correctly opened */
if (!video) {
printf("Unable to open \"%s\"\n", argv[1]);
return 1;
}
/* load the classifier */
cascade = ( CvHaarClassifierCascade* )cvLoad( cascadefile, 0, 0, 0 );
if(!cascade){
printf("Error loading the classifier.");
return 1;
}
/* setup the memory buffer for the face detector */
storage = cvCreateMemStorage( 0 );
if(!storage){
printf("Error creating the memory storage.");
return 1;
}
/* create a video window, auto size */
cvNamedWindow(window_name, CV_WINDOW_AUTOSIZE);
/* get a frame. Necessary for use the cvGetCaptureProperty */
frame = cvQueryFrame(video);
/* calculate the delay between each frame and display video's FPS */
printf("%2.2f FPS\n", cvGetCaptureProperty(video, CV_CAP_PROP_FPS));
delay = (int) (1000/cvGetCaptureProperty(video, CV_CAP_PROP_FPS));
while (frame) {
/* show loaded frame */
cvShowImage(window_name, frame);
/* wait delay and check for the quit key */
key = cvWaitKey(delay);
if(key=='q') break;
/* load and check next frame*/
frame = cvQueryFrame(video);
if(!frame) {
printf("error loading frame.\n");
return 1;
}
/* detect faces */
CvSeq *faces = cvHaarDetectObjects(
frame, /* image to detect objects in */
cascade, /* haar classifier cascade */
storage, /* resultant sequence of the object candidate rectangles */
1.1, /* increse window by 10% between the subsequent scans*/
3, /* 3 neighbors makes up an object */
0 /* flags CV_HAAR_DO_CANNY_PRUNNING */,
cvSize( 40, 40 )
);
/* for each face found, draw a red box */
for( i = 0 ; i < ( faces ? faces->total : 0 ) ; i++ ) {
CvRect *r = ( CvRect* )cvGetSeqElem( faces, i );
cvRectangle( frame,
cvPoint( r->x, r->y ),
cvPoint( r->x + r->width, r->y + r->height ),
CV_RGB( 255, 0, 0 ), 1, 8, 0 );
}
}
}
Yeah, I know the code needs a few adjustments. ¬¬
To compile it in a well configured OpenCV development environment:
gcc faceplayer.c -o faceplayer `pkg-config opencv ‑‑libs ‑‑cflags`
To run it you have to put in the same directory of the binary the XML classifier (haarcascade_frontalface_alt.xml) that comes with OpenCV sources at OpenCV-2.0.0/data/haarcascades/. And so:
./faceplayer video.avi
The results I got so far is that it works well for faces but sometimes its also detects more than faces. And here a video of it working live.
A example of good result:
A example of bad result:
Maybe with some adjustments it could performs even better. But was really easy to create it using OpenCV.
I found this useful tip about how to convert videos to watch on Nokia n800 using Mencoder.
mencoder input.ogg -vf scale=400:240 -oac mp3lame -ovc lavc -o output.avi
It’s converts a filed called input.ogg to a avi file output.avi with height 240 and width 400 (the device resolution is 800×480) , mp3lame audio codec and libavcodec video.
As this has become a daily operation to me, I create this simple script called 2n800:
#!/bin/sh
if [ $# -ge 1 ];
then
mencoder $1 -vf scale=400:240 -oac mp3lame -ovc lavc -o ${1%\.*}.avi
else
echo Usage:
echo "\t$0 FILE"
fi
It transforms the first parameter like something.flv to something.avi. Putting this script as executable on your path like on /usr/bin/ you can easily call the command 2n800 followed with tha name of your video you want to convert. If is readable by Mplayer, it will be converted.
After you converted you video and sent to your n800, you can watch on Mplayer to Maemo. The result is perfect.
Amazing video showing our DNA synthesis as a Turing-machine-like.