I am making a concurrent platform independent client-server program and my UNIX accept()
failed due to a bad file descriptor yet winsock works fine. I tested it on both Linux and Windows.
int child, len, option = 1, rd;
SOCKET client_socket;
struct sockaddr_in client_addr;
SOCKET server_socket = socket(AF_INET, SOCK_STREAM, 0);
if (server_socket == INVALID_SOCKET)
{
fprintf(stderr, "Unable to initialize server socket");
return -1;
}
else printf("Socket successfully created.\n");
if (setsockopt(server_socket, SOL_SOCKET, SO_REUSEADDR, &option, sizeof(int)) == SOCKET_ERROR)
{
fprintf(stderr, "setsockopt for SO_REUSEADDR failed.\n");
}
else printf("Set SO_REUSEADDR: ON\n");
struct sockaddr_in server_addr;
server_addr.sin_family = AF_INET;
server_addr.sin_addr.s_addr = INADDR_ANY;
server_addr.sin_port = htons(PORT);
if (bind(server_socket, (struct sockaddr *) &server_addr, sizeof(server_addr)) == SOCKET_ERROR)
{
fprintf(stderr, "Unable to bind to %s port %d\n", inet_ntoa(server_addr.sin_addr), PORT);
perror("");
close(server_socket);
return -1;
}
else printf("Bind successful.\n");
if (listen(server_socket, SOMAXCONN) == SOCKET_ERROR)
{
fprintf(stderr, "Unable to put server socket into listen state.\n");
close(server_socket);
return -1;
}
else printf("Listening on %s:%d.\n", inet_ntoa(server_addr.sin_addr), PORT);
socklen_t cli_addr_size = sizeof(client_addr);
while (true)
{
//HERE
client_socket = accept(server_socket, (struct sockaddr *) &client_addr, (socklen_t*) &cli_addr_size);
if (client_socket == INVALID_SOCKET)
{
fprintf(stderr, "Accept failed with %d\n", client_socket);
perror("");
return -1;
}
else
{
printf("Client %s:%d joined the server.\n", inet_ntoa(client_addr.sin_addr), ntohs(client_addr.sin_port));
child = fork();
if (child < 0)
{
perror("ERROR on fork");
exit(-1);
}
printf("fork() returned %d.\n", child);
if (child == 0)
{
close(server_socket);
rd = recv(client_socket, rcvbuf, sizeof(rcvbuf), 0);
if (rd < 0)
perror("Error in receiving message");
else if (rd == 0)
printf("Client %s disconnected\n", inet_ntoa(client_addr.sin_addr));
else
{
sprintf(sndbuf, "Client %s:%d: %s\n", inet_ntoa(client_addr.sin_addr), ntohs(client_addr.sin_port),
rcvbuf);
puts(sndbuf);
if (send(client_socket, sndbuf, sizeof(sndbuf), 0) < 0)
{
perror("Error in sending message");
}
}
}
}
}
I have included the chunk of code to prevent guesswork.
Is there also a better way to implement fork()
for example by graceful prevention of orphans?
Improvements are welcome.
CodePudding user response:
After the first time a connection is accepted and a child is forked, the child process is continuing the while (true)
after it finished processing the connection. And since it closed the listening socket, it gets an error when calling accept
.
The child should call _exit
when it's done with its processing:
if (child == 0)
{
close(server_socket);
rd = recv(client_socket, rcvbuf, sizeof(rcvbuf), 0);
if (rd < 0)
perror("Error in receiving message");
else if (rd == 0)
printf("Client %s disconnected\n", inet_ntoa(client_addr.sin_addr));
else
{
sprintf(sndbuf, "Client %s:%d: %s\n", inet_ntoa(client_addr.sin_addr), ntohs(client_addr.sin_port),
rcvbuf);
puts(sndbuf);
if (send(client_socket, sndbuf, sizeof(sndbuf), 0) < 0)
{
perror("Error in sending message");
}
}
_exit(0);
}
You probably also want to have the parent periodically wait
for child processes so you don't have zombie processing sitting around.
}