MST

星途 面试题库

面试题:C语言多线程网络服务器的性能优化

设计一个基于C语言的多线程网络服务器,用于处理大量客户端的并发请求。要求阐述如何优化服务器的性能,比如如何避免线程上下文切换带来的开销,如何高效地管理线程池,以及如何利用操作系统提供的异步I/O机制来提高网络I/O效率。同时,给出关键代码片段和性能优化的思路。
26.8万 热度难度
编程语言C

知识考点

AI 面试

面试题答案

一键面试

1. 避免线程上下文切换带来的开销

  • 减少线程数量:合理估算服务器能处理的最大并发数,创建适量的线程,避免过多线程导致频繁上下文切换。例如,根据服务器CPU核心数来确定线程池大小,一般线程数等于CPU核心数或略多一点。
  • 使用线程亲和性:将线程绑定到特定的CPU核心上,这样线程在执行过程中不会被调度到其他核心,减少缓存失效和上下文切换开销。在Linux下可以使用pthread_setaffinity_np函数,代码示例:
#include <pthread.h>
#include <sched.h>
#include <stdio.h>

void* thread_function(void* arg) {
    cpu_set_t cpu_set;
    CPU_ZERO(&cpu_set);
    CPU_SET(0, &cpu_set); // 将线程绑定到CPU核心0
    if (pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpu_set) != 0) {
        perror("pthread_setaffinity_np");
    }
    // 线程执行的代码
    return NULL;
}

2. 高效地管理线程池

  • 任务队列:使用一个任务队列来存储待处理的客户端请求。线程池中的线程从任务队列中取出任务并执行。可以使用链表或数组来实现任务队列。例如,使用链表实现任务队列的结构体定义:
typedef struct Task {
    void (*func)(void*);
    void* arg;
    struct Task* next;
} Task;

typedef struct ThreadPool {
    Task* head;
    Task* tail;
    pthread_mutex_t mutex;
    pthread_cond_t cond;
    int stop;
    pthread_t* threads;
    int num_threads;
} ThreadPool;
  • 线程创建与销毁:线程池初始化时创建一定数量的线程,在服务器运行期间这些线程一直存在,避免频繁创建和销毁线程的开销。线程池销毁时,等待所有任务处理完毕,然后销毁线程。
void* worker(void* arg) {
    ThreadPool* pool = (ThreadPool*)arg;
    while (1) {
        pthread_mutex_lock(&pool->mutex);
        while (pool->head == NULL &&!pool->stop) {
            pthread_cond_wait(&pool->cond, &pool->mutex);
        }
        if (pool->stop && pool->head == NULL) {
            pthread_mutex_unlock(&pool->mutex);
            pthread_exit(NULL);
        }
        Task* task = pool->head;
        pool->head = task->next;
        if (pool->head == NULL) {
            pool->tail = NULL;
        }
        pthread_mutex_unlock(&pool->mutex);
        (*task->func)(task->arg);
        free(task);
    }
    return NULL;
}

ThreadPool* create_thread_pool(int num_threads) {
    ThreadPool* pool = (ThreadPool*)malloc(sizeof(ThreadPool));
    pool->head = NULL;
    pool->tail = NULL;
    pool->stop = 0;
    pool->num_threads = num_threads;
    pool->threads = (pthread_t*)malloc(num_threads * sizeof(pthread_t));
    pthread_mutex_init(&pool->mutex, NULL);
    pthread_cond_init(&pool->cond, NULL);
    for (int i = 0; i < num_threads; ++i) {
        pthread_create(&pool->threads[i], NULL, worker, pool);
    }
    return pool;
}

void destroy_thread_pool(ThreadPool* pool) {
    pthread_mutex_lock(&pool->mutex);
    pool->stop = 1;
    pthread_cond_broadcast(&pool->cond);
    pthread_mutex_unlock(&pool->mutex);
    for (int i = 0; i < pool->num_threads; ++i) {
        pthread_join(pool->threads[i], NULL);
    }
    pthread_mutex_destroy(&pool->mutex);
    pthread_cond_destroy(&pool->cond);
    free(pool->threads);
    Task* task;
    while (pool->head != NULL) {
        task = pool->head;
        pool->head = task->next;
        free(task);
    }
    free(pool);
}

3. 利用操作系统提供的异步I/O机制来提高网络I/O效率

  • Linux下的aio:使用Linux的异步I/O接口libaio。首先需要初始化io_context,然后提交I/O请求。例如,异步读取客户端数据的代码片段:
#include <aio.h>
#include <stdio.h>
#include <fcntl.h>
#include <unistd.h>
#include <stdlib.h>

#define BUFFER_SIZE 1024

void read_callback(io_event* events, int num_events) {
    for (int i = 0; i < num_events; ++i) {
        ssize_t res = events[i].res;
        if (res > 0) {
            char buffer[BUFFER_SIZE];
            printf("Read %zd bytes: %.*s\n", res, (int)res, buffer);
        } else if (res == 0) {
            printf("End of file\n");
        } else {
            perror("aio_read error");
        }
    }
}

int main() {
    io_context_t io_ctx;
    io_queue_init(1, &io_ctx);
    int fd = open("test.txt", O_RDONLY);
    if (fd < 0) {
        perror("open");
        return 1;
    }
    struct iocb iocb;
    io_prep_pread(&iocb, fd, buffer, BUFFER_SIZE, 0);
    io_submit(io_ctx, 1, &iocb);
    io_event events[1];
    io_getevents(io_ctx, 1, 1, events, NULL);
    read_callback(events, 1);
    io_queue_release(io_ctx);
    close(fd);
    return 0;
}
  • Windows下的Overlapped I/O:在Windows下使用CreateIoCompletionPort等函数实现异步I/O。首先创建一个I/O完成端口,然后将套接字与完成端口关联,当I/O操作完成时,线程可以从完成端口获取结果。

关键代码片段整合

以下是一个简单的基于线程池和异步I/O(以Linux aio为例)的多线程网络服务器关键代码整合示例:

#include <stdio.h>
#include <stdlib.h>
#include <pthread.h>
#include <aio.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#include <unistd.h>
#include <string.h>

#define PORT 8888
#define BACKLOG 10
#define BUFFER_SIZE 1024

// 任务队列结构体
typedef struct Task {
    int client_fd;
    struct Task* next;
} Task;

// 线程池结构体
typedef struct ThreadPool {
    Task* head;
    Task* tail;
    pthread_mutex_t mutex;
    pthread_cond_t cond;
    int stop;
    pthread_t* threads;
    int num_threads;
} ThreadPool;

// 线程执行函数
void* worker(void* arg) {
    ThreadPool* pool = (ThreadPool*)arg;
    while (1) {
        pthread_mutex_lock(&pool->mutex);
        while (pool->head == NULL &&!pool->stop) {
            pthread_cond_wait(&pool->cond, &pool->mutex);
        }
        if (pool->stop && pool->head == NULL) {
            pthread_mutex_unlock(&pool->mutex);
            pthread_exit(NULL);
        }
        Task* task = pool->head;
        pool->head = task->next;
        if (pool->head == NULL) {
            pool->tail = NULL;
        }
        pthread_mutex_unlock(&pool->mutex);

        // 异步读取客户端数据
        io_context_t io_ctx;
        io_queue_init(1, &io_ctx);
        struct iocb iocb;
        char buffer[BUFFER_SIZE];
        io_prep_pread(&iocb, task->client_fd, buffer, BUFFER_SIZE, 0);
        io_submit(io_ctx, 1, &iocb);
        io_event events[1];
        io_getevents(io_ctx, 1, 1, events, NULL);
        ssize_t res = events[0].res;
        if (res > 0) {
            buffer[res] = '\0';
            printf("Received: %s\n", buffer);
        } else if (res == 0) {
            printf("Client closed connection\n");
        } else {
            perror("aio_read error");
        }
        io_queue_release(io_ctx);
        close(task->client_fd);
        free(task);
    }
    return NULL;
}

// 创建线程池
ThreadPool* create_thread_pool(int num_threads) {
    ThreadPool* pool = (ThreadPool*)malloc(sizeof(ThreadPool));
    pool->head = NULL;
    pool->tail = NULL;
    pool->stop = 0;
    pool->num_threads = num_threads;
    pool->threads = (pthread_t*)malloc(num_threads * sizeof(pthread_t));
    pthread_mutex_init(&pool->mutex, NULL);
    pthread_cond_init(&pool->cond, NULL);
    for (int i = 0; i < num_threads; ++i) {
        pthread_create(&pool->threads[i], NULL, worker, pool);
    }
    return pool;
}

// 销毁线程池
void destroy_thread_pool(ThreadPool* pool) {
    pthread_mutex_lock(&pool->mutex);
    pool->stop = 1;
    pthread_cond_broadcast(&pool->cond);
    pthread_mutex_unlock(&pool->mutex);
    for (int i = 0; i < pool->num_threads; ++i) {
        pthread_join(pool->threads[i], NULL);
    }
    pthread_mutex_destroy(&pool->mutex);
    pthread_cond_destroy(&pool->cond);
    free(pool->threads);
    Task* task;
    while (pool->head != NULL) {
        task = pool->head;
        pool->head = task->next;
        free(task);
    }
    free(pool);
}

// 添加任务到任务队列
void add_task(ThreadPool* pool, int client_fd) {
    Task* task = (Task*)malloc(sizeof(Task));
    task->client_fd = client_fd;
    task->next = NULL;
    pthread_mutex_lock(&pool->mutex);
    if (pool->tail == NULL) {
        pool->head = task;
        pool->tail = task;
    } else {
        pool->tail->next = task;
        pool->tail = task;
    }
    pthread_cond_signal(&pool->cond);
    pthread_mutex_unlock(&pool->mutex);
}

int main() {
    int server_fd, client_fd;
    struct sockaddr_in server_addr, client_addr;
    socklen_t client_addr_len = sizeof(client_addr);

    server_fd = socket(AF_INET, SOCK_STREAM, 0);
    if (server_fd < 0) {
        perror("socket");
        return 1;
    }

    server_addr.sin_family = AF_INET;
    server_addr.sin_port = htons(PORT);
    server_addr.sin_addr.s_addr = INADDR_ANY;

    if (bind(server_fd, (struct sockaddr*)&server_addr, sizeof(server_addr)) < 0) {
        perror("bind");
        close(server_fd);
        return 1;
    }

    if (listen(server_fd, BACKLOG) < 0) {
        perror("listen");
        close(server_fd);
        return 1;
    }

    ThreadPool* pool = create_thread_pool(4); // 创建包含4个线程的线程池

    while (1) {
        client_fd = accept(server_fd, (struct sockaddr*)&client_addr, &client_addr_len);
        if (client_fd < 0) {
            perror("accept");
            continue;
        }
        add_task(pool, client_fd);
    }

    destroy_thread_pool(pool);
    close(server_fd);
    return 0;
}

性能优化思路

  • 缓存优化:对于频繁访问的数据,使用缓存机制,减少磁盘I/O或网络I/O。例如,可以使用内存缓存(如Redis)来存储热点数据。
  • 协议优化:选择高效的网络协议,如UDP在某些场景下比TCP更适合,或者对TCP进行参数调优,如调整TCP窗口大小等。
  • 负载均衡:如果是多台服务器,可以使用负载均衡器将客户端请求均匀分配到各个服务器上,避免单个服务器负载过高。