Something is making the window black

Hello! I am new to SDL and am using it to write a custom graphical engine. I am running SDL2 2.0.10-1 on linux.
I’m (for now) trying to output some random noise to a window of your chosen size, but all that happens is a black window. I did get it to render something resembling random noise once when I was spamming the program to run. It looked more white than noise, kind of like it only had a couple of “blocks” of noise on a white background.

Run the program with ./main [width] [height]

main.c

#include <stdio.h>
#include <omp.h>
#include <stdlib.h>
#include <SDL2/SDL.h>
#include <stdint.h>


int main(int argc, char *argv[])
{
	if (argc != 3){
		printf("Wrong amount of args given: expected width, height.\n");
		return 1;
	}

	int w = atoi(argv[1]);
	int h = atoi(argv[2]);

	int pixels[w][h];

	int pitch = w*4;



	if (SDL_Init(SDL_INIT_EVERYTHING) != 0) {
		fprintf(stderr, "SDL_Init Error: %s\n", SDL_GetError());
		return EXIT_FAILURE;
	}

	SDL_Window *window;
	SDL_Renderer *renderer;
	SDL_CreateWindowAndRenderer(w, h, 0, &window, &renderer);

	SDL_Texture* buffer = SDL_CreateTexture(renderer,
						  SDL_PIXELFORMAT_RGBA8888,
						  SDL_TEXTUREACCESS_STREAMING, 
						  w,h);

	SDL_LockTexture (buffer, NULL, (void **) &pixels, &pitch);

	printf("\nStarting render...\n\nWidth:  %d\nHeight: %d\n", w, h);

	#pragma omp parallel for
	for (int y = 0; y < h; ++y){
		for (int x = 0; x < w; ++x){
			pixels[x][y] = rand();
		}
	}

	printf("parallel for ends.\n");
	SDL_RenderClear(renderer); 
	SDL_RenderCopy(renderer, buffer, NULL, NULL);
	SDL_RenderPresent(renderer);
	SDL_UnlockTexture(buffer);
	

	SDL_Delay(2000);

	SDL_DestroyRenderer(renderer);
	SDL_DestroyWindow(window);
	SDL_Quit();

	return 0;
}

makefile

CC?=gcc
SDL2FLAGS=$(shell pkg-config sdl2 --cflags --libs)
CFLAGS?=-std=c11 -fopenmp -Wall -pedantic -Werror -Wshadow -Wstrict-aliasing -Wstrict-overflow

.PHONY: all msg clean fullclean

all: msg main

msg:
	@echo --- C11 ---

main: main.c
	${CC} ${CFLAGS} -O3 -o $@ $< ${SDL2FLAGS}

small: main.c
	${CC} ${CFLAGS} -Os -o main $< ${SDL2FLAGS}
	-strip main
	-sstrip main

debug: main.c
	${CC} ${CFLAGS} -O1 -g -o main $< ${SDL2FLAGS}

asm: main.asm

main.asm: main.c
	${CC} ${CFLAGS} -S -o main.asm $< ${SDL2FLAGS}

run: msg main
	time ./main

clean:
	rm -f main *.o main.asm

fullclean: clean

Two points:

  • If I’m correct, SDL_LockTexture gives you a pixels array which is 1D Byte array of size w*h*4 (since you are using RGBA8888). So you should access it via pixels[x*4+y*pitch]
  • rand() returns a very big pseudo-random number. You want bytes, so modulo 255: rand()%255.

Ah, I was operating on the assumption that each pixel was stored as a single int, and that each byte of that int then made up a color.

Well this just throws a segfault

int main(int argc, char *argv[])
{
	if (argc!=3){
		printf("Wrong amount of args given: expected width, height.\n");
		return 1;
	}

    srand(clock());

	int w = atoi(argv[1]);
	int h = atoi(argv[2]);

	int pixels[w*h];

	int pitch = w*4;



	if (SDL_Init(SDL_INIT_EVERYTHING) != 0) {
		fprintf(stderr, "SDL_Init Error: %s\n", SDL_GetError());
		return EXIT_FAILURE;
	}

	SDL_Window *window;
	SDL_Renderer *renderer;
	SDL_CreateWindowAndRenderer(w, h, 0, &window, &renderer);

	SDL_Texture* buffer = SDL_CreateTexture(renderer,
						  SDL_PIXELFORMAT_RGBA8888,
						  SDL_TEXTUREACCESS_STREAMING, 
						  w,h);

	SDL_LockTexture (buffer, NULL, (void **) &pixels, &pitch);

	printf("\nStarting render...\n\nWidth:  %d\nHeight: %d\n", w, h);

	//#pragma omp parallel for
	for (int x = 0; x < w; ++x){
		for (int y = 0; y < h; ++y){
			int index = x*4+y*pitch;
			pixels[index] = rand();
		}
	}

	printf("parallel for ends.\n");
	SDL_RenderClear(renderer); 
	SDL_RenderCopy(renderer, buffer, NULL, NULL);
	SDL_RenderPresent(renderer);
	SDL_UnlockTexture(buffer);
	

	SDL_Delay(2000);

	SDL_DestroyRenderer(renderer);
	SDL_DestroyWindow(window);
	SDL_Quit();

	return 0;
}

shouldn’t you rather define uint8_t pixels?
Oh, and the segfault is surely because you should have defined [4*w*h]

Here’s my new main, no more errors but still only a black screen. This should be producing random shades of red, if I am correct.

int main(int argc, char *argv[])
{
if (argc!=3){
printf(“Wrong amount of args given: expected width, height.\n”);
return 1;
}

srand(clock());

int w = atoi(argv[1]);
int h = atoi(argv[2]);

int pitch = w*3;

uint8_t pixels[h*pitch];




if (SDL_Init(SDL_INIT_EVERYTHING) != 0) {
	fprintf(stderr, "SDL_Init Error: %s\n", SDL_GetError());
	return EXIT_FAILURE;
}

SDL_Window *window;
SDL_Renderer *renderer;
SDL_CreateWindowAndRenderer(w, h, 0, &window, &renderer);

SDL_Texture* buffer = SDL_CreateTexture(renderer,
					  SDL_PIXELFORMAT_RGB888,
					  SDL_TEXTUREACCESS_STREAMING, 
					  w,h);

printf("\nStarting render...\n\nWidth:  %d\nHeight: %d\n", w, h);

#pragma omp parallel for
for (int x = 0; x < w; ++x){
	for (int y = 0; y < h; ++y){
		int index = x*3+y*pitch;
		pixels[index] = (uint8_t)rand()%255;
	}
}

printf("parallel for ends.\n");
SDL_LockTexture (buffer, NULL, (void **) &pixels, &pitch);
SDL_RenderClear(renderer); 
SDL_RenderCopy(renderer, buffer, NULL, NULL);
SDL_RenderPresent(renderer);
SDL_UnlockTexture(buffer);


SDL_Delay(2000);

SDL_DestroyRenderer(renderer);
SDL_DestroyWindow(window);
SDL_Quit();

return 0;

}

ok now I suppose that the order is not correct. It should be

  1. define texure
  2. lock texture, this gives you the pixels
  3. modify the pixels
  4. unlock texture
  5. render copy
  6. render present

weird, putting the LockTexture before the pixel editing loop creates a segfault.

Edit: looks like it’s the index that’s wrong, or something.

[Thread debugging using libthread_db enabled]
Using host libthread_db library "/usr/lib/libthread_db.so.1".
Core was generated by `./main 1920 1080'.
Program terminated with signal SIGSEGV, Segmentation fault.
#0  0x000055c768f962d5 in main (argc=<optimized out>, argv=<optimized out>)
    at main.c:58
58				pixels[index] = (uint8_t)rand()%255;
[Current thread is 1 (Thread 0x7fefb2de17c0 (LWP 5008))]
(gdb)

The SDL_CreateTexture() overwrites the table to some different data structure, doesn’t it? I get that creeping suspicion. I tried to look up some documentation but really didn’t find anything, not even a code example on this stuff.

There was a code example on CreateTexture but I don’t get it. Should I not be editing pixel arrays and feeding them into SDL?

you’re a bit too fast… try this:

int pitch;
Uint8 *pixels;
  • lock
  • modify with

pixels[x*4 + pitch*y] = rand() % 255;
pixels[x*4+1 + pitch*y] = rand() % 255;
pixels[x*4+2 + pitch*y] = rand() % 255;

  • unlock
  • copy
  • present

it should work

Thanks, this works!
I have some questions:

  • Why do you step x by 4, even though there’s only 3 bytes per pixel? I tried 3, but that ended up rendering a black bar to the right. Using your method it rendered correctly.
  • Why a pointer instead of an array? Is there any chance of segfaults with that? (More of a C question)
  • What library to get Uint8? I only have uint8_t with <stdint.h>.

Working code for those wandering the same path in the future:

#include <stdio.h>
#include <omp.h>
#include <stdlib.h>
#include <SDL2/SDL.h>
#include <stdint.h>
#include <time.h>

int main(int argc, char *argv[])
{
	if (argc!=3){
		printf("Wrong amount of args given: expected width, height.\n");
		return 1;
	}

	srand(clock());

	int w = atoi(argv[1]);
	int h = atoi(argv[2]);

	int pitch = w*3;
	int index;
	uint8_t *pixels;


	if (SDL_Init(SDL_INIT_EVERYTHING) != 0) {
		fprintf(stderr, "SDL_Init Error: %s\n", SDL_GetError());
		return EXIT_FAILURE;
	}

	SDL_Event event;
	SDL_Window *window;
	SDL_Renderer *renderer;

	SDL_CreateWindowAndRenderer(w, h, 0, &window, &renderer);

	SDL_Texture* buffer = SDL_CreateTexture(renderer,
						  SDL_PIXELFORMAT_RGB888,
						  SDL_TEXTUREACCESS_STREAMING, 
						  w,h);

	printf("\nStarting render...\n\nWidth:  %d\nHeight: %d\n", w, h);


	while (1) {
		SDL_PollEvent(&event);
		if(event.type == SDL_QUIT)
			break;

		SDL_LockTexture (buffer, NULL, (void **) &pixels, &pitch);
		
		
		//#pragma omp parallel for
		for (int x = 0; x < w; ++x)
		{
			for (int y = 0; y < h; ++y)
			{
				index = x*4 + y*pitch;

				pixels[index] = rand() % 255;
				pixels[index+1] = rand() % 255;
				pixels[index+2] = rand() % 255;
			}
		}

		SDL_UnlockTexture(buffer);
		SDL_RenderClear(renderer); 
		SDL_RenderCopy(renderer, buffer, NULL, NULL);
		SDL_RenderPresent(renderer);
		SDL_Delay(100);
	}

	SDL_DestroyRenderer(renderer);
	SDL_DestroyWindow(window);
	SDL_Quit();

	return 0;
}

good. But your code is not completely correct. Try this one, you will see that the pitch is not what one could expect. The reason (I suppose) is that SDL uses Int32 to store either RGB or RGBA, for efficiency reasons. In the case of RGB, the last byte is ignored.

#include <stdio.h>
#include <omp.h>
#include <stdlib.h>
#include <SDL2/SDL.h>
#include <stdint.h>


int main(int argc, char *argv[])
{
if (argc != 3){
	printf("Wrong amount of args given: expected width, height.\n");
	return 1;
}

int w = atoi(argv[1]);
int h = atoi(argv[2]);

int pitch;
int bytesPerPixel;
Uint8 *pixels;





if (SDL_Init(SDL_INIT_EVERYTHING) != 0) {
	fprintf(stderr, "SDL_Init Error: %s\n", SDL_GetError());
	return EXIT_FAILURE;
}

SDL_Window *window;
SDL_Renderer *renderer;
SDL_CreateWindowAndRenderer(w, h, 0, &window, &renderer);

SDL_Texture* buffer = SDL_CreateTexture(renderer,
					SDL_PIXELFORMAT_RGB888,
					  SDL_TEXTUREACCESS_STREAMING, 
					  w,h);

SDL_LockTexture (buffer, NULL, (void **) &pixels, &pitch);
bytesPerPixel = pitch/w;
printf("\nStarting render...\n\nWidth:  %d\nHeight: %d\nBPP: %d\n", w, h, bytesPerPixel);

#pragma omp parallel for
for (int y = 0; y < h; ++y){
	for (int x = 0; x < w; ++x){
		pixels[x*bytesPerPixel + pitch*y] = rand() % 255;
		pixels[x*bytesPerPixel+1 + pitch*y] = rand() % 255;
		pixels[x*bytesPerPixel+2 + pitch*y] = rand() % 255;
	}
}
SDL_UnlockTexture(buffer);

printf("parallel for ends.\n");
SDL_RenderClear(renderer); 
SDL_RenderCopy(renderer, buffer, NULL, NULL);
SDL_RenderPresent(renderer);


SDL_Delay(2000);

SDL_DestroyRenderer(renderer);
SDL_DestroyWindow(window);
SDL_Quit();

return 0;

}

PS: after googleing, I realise that my first post was not completely correct. You can actually use Uint32 *pixels;, in which case in the loop you can simply access the pixels via pixels[y * w + x]. For finding the correct bytes order for coding a color, you may use SDL_MapRGBA

@Sanette, do you know why “SDL_LockTexture()” and “SDL_UnlockTexture()” affect the “free()” function?

I’ve been trying the same plan as you showed above, with an array of pixels, but my array of pixels is placed on the heap with “malloc()”. Locking and unlocking the texture displays my images correctly, but closing the program shows an error that says “Debug Assertion Failed”. If I remove the line that says “free(pixels)”, the error WON’T appear. Why would the locking functions affect the process of freeing my array’s memory?

I’ve always been using a function called “SDL_UpdateTexture()” and that function doesn’t cause problems. The only reason I’ve wanted to switch to “SDL_LockTexture()” and “SDL_UnlockTexture()” is that the wiki page for “SDL_UpdateTexture()” suggests that the locking functions are faster. I noticed a speed boost of around 7 percent with them, but I don’t know why they ruin the process of freeing the memory.

I looked at the source code of “sdl_render.c” and searched for all instances of “SDL_LockTexture”, and they seem to create an extra copy of a pixels array and fill that array instead, but I copied that idea, and it caused the program to free the memory without an error, but transferring all of my pixels to a second array caused the whole process to be slower than just using “SDL_UpdateTexture()”.

This is a sample of what I did:

Uint32 *pixels;

pixels = (Uint32*)malloc((SCREEN_WIDTH * SCREEN_HEIGHT) * sizeof(Uint32));

int pitch_test = SCREEN_WIDTH * sizeof(Uint32);

SDL_Texture* texture = NULL;

texture = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_ARGB8888, SDL_TEXTUREACCESS_STREAMING, SCREEN_WIDTH, SCREEN_HEIGHT);

SDL_LockTexture(texture, NULL, (void **)&pixels, &pitch_test);

(my pixel-filling parts are here.)

SDL_UnlockTexture(texture);

SDL_RenderClear(renderer);
SDL_RenderCopy(renderer, texture, NULL, NULL);
SDL_RenderPresent(renderer);

Edit 1:
I decided to place my array of pixels on the stack instead of on the heap, so I won’t need to free the memory. Now I can enjoy the 7% speed improvement, and I will assume that “SDL_LockTexture()” is not designed to handle a pixels array that’s on the heap.

Hi, I never actually use SDL with the C language, so I’m not the right person for this question. However, maybe the following is related: when you destroy a texture, it will free the associated data. So maybe you could try to destroy the texture.