Welcome to OGeek Q&A Community for programmer and developer-Open, Learning and Share
Welcome To Ask or Share your Answers For Others

Categories

0 votes
171 views
in Technique[技术] by (71.8m points)

c++ - CUDA - median filter implementation does not produce desired results

I have been trying to implement the algorithm for the median filter presented in the Wiki article: http://en.wikipedia.org/wiki/Median_filter#2D_median_filter_pseudo_code

To the best of my knowledge, I know that what I have implemented is right. However, when I view the results, I cannot seem to get an output which is similar to the output produced by the median blur function in OpenCV. At the present moment, I am not concerned about speeding up my code by using shared memory or texture memory. I would just like to get things working first. The size of my input image is 1024 x 256 pixels.

What am I doing wrong? Is there a thread leak in my code? I know that I should use shared memory to prevent the global read because currently, I am reading data from the global memory a lot.

http://snag.gy/OkXzP.jpg -- the first image is the input, second image is my algorithm result, and the third is the openCV medianblur function result. Ideally, I would like my algorithm to output the same result as the medianblur function.

This is all the code that I have written:

kernel implementation

#include "cuda.h"
#include "cuda_runtime_api.h"
#include "device_launch_parameters.h"
#include "device_functions.h"
#include "highgui.h"
//#include "opencv2/core/imgproc.hpp"
//#include "opencv2/core/gpu.hpp"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>

// includes, project

#include "cufft.h"
#include "cublas_v2.h"
#include "CUDA_wrapper.h"   // contains only func_prototype for function take_input()


// define the threads and grids for CUDA
#define BLOCK_ROWS 32
#define BLOCK_COLS 16

// define kernel dimensions
#define KERNEL_DIMENSION 3
#define MEDIAN_DIMENSION 3
#define MEDIAN_LENGTH 9

// this is the error checking part for CUDA
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
   if (code != cudaSuccess) 
   {
      fprintf(stderr,"GPUassert: %s %s %d
", cudaGetErrorString(code), file, line);
      if (abort) exit(code);
   }
}

// create two vars for the rows and cols of the image
    int d_imgRows;
    int d_imgCols; 

__global__ void FilterKernel (unsigned short *d_input_img, unsigned short *d_output_img, int d_iRows, int d_iCols)

{  
    unsigned short window[BLOCK_ROWS*BLOCK_COLS][KERNEL_DIMENSION*KERNEL_DIMENSION];

    unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
    unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;   

    unsigned int tid = threadIdx.y*blockDim.y+threadIdx.x;

    if(x>d_iCols || y>d_iRows)
        return;

    window[tid][0]= (y==0||x==0) ? 0.0f : d_input_img[(y-1)*d_iCols+(x-1)];
    window[tid][1]= (y==0) ? 0.0f : d_input_img[(y-1)*d_iCols+x]; 
    window[tid][2]= (y==0||x==d_iCols-1) ? 0.0f : d_input_img[(y-1)*d_iCols+(x+1)];
    window[tid][3]= (x==0) ? 0.0f : d_input_img[y*d_iCols+(x-1)];
    window[tid][4]= d_input_img[y*d_iCols+x];
    window[tid][5]= (x==d_iCols-1) ? 0.0f : d_input_img[y*d_iCols+(x+1)];
    window[tid][6]= (y==d_iRows-1||x==0) ? 0.0f : d_input_img[(y+1)*d_iCols+(x-1)];
    window[tid][7]= (y==d_iRows-1) ? 0.0f : d_input_img[(y+1)*d_iCols+x];
    window[tid][8]= (y==d_iRows-1||x==d_iCols-1) ? 0.0f : d_input_img[(y+1)*d_iCols+(x+1)];

   __syncthreads();

    // Order elements 
    for (unsigned int j=0; j<9; ++j)
    {
        // Find position of minimum element
        int min=j;
        for (unsigned int l=j+1; l<9; ++l)
            if (window[tid][l] < window[tid][min])
                min=l;

        // Put found minimum element in its place
        const unsigned char temp=window[tid][j];
        window[tid][j]=window[tid][min];
        window[tid][min]=temp;

        __syncthreads();
    }

    d_output_img[y*d_iCols + x] = (window[tid][4]);

}

void take_input(const cv::Mat& input, const cv::Mat& output)
{

    unsigned short *device_input; 
    unsigned short *device_output;

    size_t d_ipimgSize = input.step * input.rows;
    size_t d_opimgSize = output.step * output.rows;

    gpuErrchk( cudaMalloc( (void**) &device_input, d_ipimgSize) ); 
    gpuErrchk( cudaMalloc( (void**) &device_output, d_opimgSize) ); 

    gpuErrchk( cudaMemcpy(device_input, input.data, d_ipimgSize, cudaMemcpyHostToDevice) );

    dim3 Threads(BLOCK_ROWS, BLOCK_COLS);  // 512 threads per block
    dim3 Blocks((input.cols + Threads.x - 1)/Threads.x, (input.rows + Threads.y - 1)/Threads.y);    

    //int check = (input.cols + Threads.x - 1)/Threads.x;
    //printf( "blockx %d", check);

    FilterKernel <<< Blocks, Threads >>> (device_input, device_output, input.rows, input.cols);

    gpuErrchk(cudaDeviceSynchronize());

    gpuErrchk( cudaMemcpy(output.data, device_output, d_opimgSize, cudaMemcpyDeviceToHost) );

    //printf( "num_rows_cuda %d", num_rows);
    //printf("
");

    gpuErrchk(cudaFree(device_input));
    gpuErrchk(cudaFree(device_output));

}

main function

#pragma once
#include<iostream>
#include<opencv2/core/core.hpp>
#include<opencv2/highgui/highgui.hpp>
#include<opencv2/imgproc/imgproc.hpp>
#include<opencv2/gpu/gpu.hpp>

#include <CUDA_wrapper.h>

using std::cout;
using std::endl;

int main()
{   

    //Read the image from harddisk, into a cv::Mat
    //IplImage *img=cvLoadImage("image.jpg");
    //cv::Mat input(img);
    cv::Mat input = cv::imread("C:/Users/OCT/Documents/Visual Studio 2008/Projects/MedianFilter/MedianFilter/pic1.bmp",CV_LOAD_IMAGE_GRAYSCALE);

    //IplImage* input = cvLoadImage("G:/Research/CUDA/Trials/OCTFilter/Debug/pic1.bmp");
    if(input.empty())
    {
        cout<<"Image Not Found"<<endl;
        getchar();
        return -1;
    }

    cv::Mat output(input.rows,input.cols,CV_8UC1);

    // store the different details of the input image like img_data, rows, cols in variables 
    int Rows = input.rows;
    int Cols = input.cols;
    unsigned char* Data = input.data;

    cout<<"image rows "<<Rows<<endl;
    cout<<"image cols "<<Cols<<endl;
    cout<<"
"<<endl;
    cout<<"data "<<(int)Data<<endl;
    cv::waitKey(0);

    // call the device function to take the image as input
    take_input(input, output);

    cv::Mat dest; 

    medianBlur ( input, dest, 3 );

    //Show the input and output
    cv::imshow("Input",input);
    cv::imshow("Output",output);
    cv::imshow("Median blur",dest);

    //Wait for key press
    cv::waitKey();
}
See Question&Answers more detail:os

与恶龙缠斗过久,自身亦成为恶龙;凝视深渊过久,深渊将回以凝视…
Welcome To Ask or Share your Answers For Others

1 Reply

0 votes
by (71.8m points)

I believe there were a variety of errors and unnecessary complication in your "kernel implementation" file.

You may have better luck with the following:

$ cat t376.cu
#include <stdlib.h>
#include <stdio.h>

#define DCOLS 1024
#define DROWS 256

typedef struct {
  size_t step;
  size_t rows;
  size_t cols;
  unsigned char *data;
} mat;


// define the threads and grids for CUDA
#define BLOCK_ROWS 32
#define BLOCK_COLS 16

// define kernel dimensions
#define MEDIAN_LENGTH 9

// this is the error checking part for CUDA
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
   if (code != cudaSuccess)
   {
      fprintf(stderr,"GPUassert: %s %s %d
", cudaGetErrorString(code), file, line);
      if (abort) exit(code);
   }
}


__global__ void FilterKernel (unsigned char *d_input_img, unsigned char *d_output_img, int d_iRows, int d_iCols)

{

    unsigned int row = blockIdx.y*blockDim.y + threadIdx.y;
    unsigned int col = blockIdx.x*blockDim.x + threadIdx.x;
    unsigned char window[MEDIAN_LENGTH];

    if(col>=d_iCols || row>=d_iRows)
        return;

    window[0]= (row==0||col==0) ? 0 :                 d_input_img[(row-1)*d_iCols+(col-1)];
    window[1]= (row==0) ? 0 :                         d_input_img[(row-1)*d_iCols+col];
    window[2]= (row==0||col==d_iCols-1) ? 0 :         d_input_img[(row-1)*d_iCols+(col+1)];
    window[3]= (col==0) ? 0 :                         d_input_img[row*d_iCols+(col-1)];
    window[4]=                                        d_input_img[row*d_iCols+col];
    window[5]= (col==d_iCols-1) ? 0 :                 d_input_img[row*d_iCols+(col+1)];
    window[6]= (row==d_iRows-1||col==0) ? 0 :         d_input_img[(row+1)*d_iCols+(col-1)];
    window[7]= (row==d_iRows-1) ? 0 :                 d_input_img[(row+1)*d_iCols+col];
    window[8]= (row==d_iRows-1||col==d_iCols-1) ? 0 : d_input_img[(row+1)*d_iCols+(col+1)];

    // Order elements
    for (unsigned int j=0; j<5; ++j)
    {
        // Find position of minimum element
        unsigned char temp = window[j];
        unsigned int  idx  = j;
        for (unsigned int l=j+1; l<9; ++l)
            if (window[l] < temp){ idx=l; temp = window[l];}
        // Put found minimum element in its place
        window[idx] = window[j];
        window[j] = temp;
    }

    d_output_img[row*d_iCols + col] = (window[4]);

}

void take_input(const mat& input, const mat& output)
{

    unsigned char *device_input;
    unsigned char *device_output;

    size_t d_ipimgSize = input.step * input.rows;
    size_t d_opimgSize = output.step * output.rows;

    gpuErrchk( cudaMalloc( (void**) &device_input, d_ipimgSize) );
    gpuErrchk( cudaMalloc( (void**) &device_output, d_opimgSize) );

    gpuErrchk( cudaMemcpy(device_input, input.data, d_ipimgSize, cudaMemcpyHostToDevice) );

    dim3 Threads(BLOCK_COLS, BLOCK_ROWS);  // 512 threads per block
    dim3 Blocks((input.cols + Threads.x - 1)/Threads.x, (input.rows + Threads.y - 1)/Threads.y);

    //int check = (input.cols + Threads.x - 1)/Threads.x;
    //printf( "blockx %d", check);

    FilterKernel <<< Blocks, Threads >>> (device_input, device_output, input.rows, input.cols);
    gpuErrchk(cudaDeviceSynchronize());
    gpuErrchk(cudaGetLastError());

    gpuErrchk( cudaMemcpy(output.data, device_output, d_opimgSize, cudaMemcpyDeviceToHost) );

    //printf( "num_rows_cuda %d", num_rows);
    //printf("
");

    gpuErrchk(cudaFree(device_input));
    gpuErrchk(cudaFree(device_output));

}

int main(){
  mat input_im, output_im;
  input_im.rows  = DROWS;
  input_im.cols  = DCOLS;
  input_im.step  = input_im.cols;
  input_im.data  = (unsigned char *)malloc(input_im.step*input_im.rows);
  output_im.rows = DROWS;
  output_im.cols = DCOLS;
  output_im.step = input_im.cols;
  output_im.data = (unsigned char *)malloc(output_im.step*output_im.rows);

  for (int i = 0; i < DCOLS*DROWS; i++) {
    output_im.data[i] = 0;
    input_im.data[i] = 0;
    int temp = (i%DCOLS);
    if (temp == 5) input_im.data[i] = 20;
    if ((temp > 5) && (temp < 15)) input_im.data[i] = 40;
    if (temp == 15) input_im.data[i] = 20;
    }

  take_input(input_im, output_im);
  for (int i = 2*DCOLS; i < DCOLS*(DROWS-2); i++)
    if (input_im.data[i] != output_im.data[i]) {printf("mismatch at %d, input: %d, output: %d
", i, (int)input_im.data[i], (int)output_im.data[i]); return 1;}
  printf("Success
");
  return 0;
}


$ nvcc -o t376 t376.cu
$ ./t376
Success
$

a few notes:

  1. I have tested this (not using OpenCV) for the simple case that I have injected in the code.
  2. Your usage of window was unnecessarily complex. Note that the way you have it, each thread will instantiate its own local copy of window independent of, and invisible to, the other threads. (perhaps you intended to use shared memory here? But I digress.)
  3. Your sorting routine was broken. I modified it to a version that I think will work.
  4. replaced pixel data types with unsigned char throughout
  5. x and y were confusing, so I changed them to row and col which seems less confusing.
  6. improved kernel error checking slightly
  7. There are many ways to optimize this. However your sensible goal was to get something working correctly first. So I'll not spend much time on optimizations except to point out the two general areas of shared memory for reuse of the window data, and an improved sorting routine.
  8. You'll need to modify this appropriately for openCV
  9. Note that if you modify it to DROWS = 1024 and DCOLS = 256, it still works.

EDIT: after reading your comments that things are still not working, it appears your OpenCV code is not set up properly to feed a single channel 8-bit grayscale (CV_8UC1) image both to and from your take_input function. The problem arises from this line:

cv::Mat input = cv::imread("C:/Users/OCT/Documents/Visual Studio 2008/Projects/MedianFilter/MedianFilter/pic1.bmp",1);

The 1 parameter being passed to imread specifies a RGB image load. Refer to imread documentation:

Now we call the imread function which loads the image name specified by the first argument (argv[1]). The second argument specifies the format in what we want the image. This may be:

CV_LOAD_IMAGE_UNCHANGED (<0) loads the image as is (including the alpha channel if present)
CV_LOAD_IMAGE_GRAYSCALE ( 0) loads the image as an intensity one
CV_LOAD_IMAGE_COLOR (>0) loads the image in the RGB format

You might have better luck if you specify CV_LOAD_IMAGE_GRAYSCALE there instead of 1.

Or else you should research how to load a image so that it ends up as a CV_8UC1 type.

But if you pass that input to take_input as-is, it surely won't work.


与恶龙缠斗过久,自身亦成为恶龙;凝视深渊过久,深渊将回以凝视…
OGeek|极客中国-欢迎来到极客的世界,一个免费开放的程序员编程交流平台!开放,进步,分享!让技术改变生活,让极客改变未来! Welcome to OGeek Q&A Community for programmer and developer-Open, Learning and Share
Click Here to Ask a Question

...