海量数据Top K算法(C实现)

       最近2天受到http://blog.csdn.net/v_JULY_v/archive/2011/05/08/6403777.aspx的影响,从头开始实现了这个算法。收获还是挺大的:(1)实现了Hash链表(2)实现了堆;(3)熟悉了C语言的文件操作;

     海量数据处理的Top K 算法就是在很大的文件中找出重复出现次数最多的前K个字符串;

     如果数据可以一次读入内存,那么可以按照如下思路来解决:

    (1)首先遍历文件,将其加入Hash链表;Hash链表的节点定义为:

typedef struct node { char *word; int count; struct node *next; }node,*node_ptr;

word指向字符串,count为出现的次数;

   (2)建立一个容量为K的最小堆,然后遍历Hash表中剩下的元素;如果找到了一个元素的count值比这个元素值大,那么交换这个节点和堆顶节点;

     完整全部代码如下:在Eclipse + CDT + mingw 下调试通过

     程序有待完善的地方:

     (1)存在一个位置bug:改变HASHLEN的值,程序崩溃;

      (2)HASH链表的遍历不正确,应该遍历到每一个节点的时候,在判断该节点的next是否为空;

 

     # include <stdio.h> # include <string.h> # include <malloc.h> # include <assert.h> # include <stdlib.h> #define HASHLEN 101 #define WORDLEN 30 #define MAX 100000 #define DOMAIN 300 #define K 5 //Hash链表的节点定义 typedef struct node { char *word; int count; struct node *next; }node,*node_ptr; static node_ptr head[HASHLEN]; static node array[K]; //Hash函数 int hash_function(char *p) { unsigned int value = 0; while (*p != '/0') { value = value * 31 + *p++; if (value > HASHLEN) value = value % HASHLEN; } return value; } //加入节点到HASH链表 void append_word(char *str) { int index = hash_function(str); node_ptr p = head[index]; while (p != NULL) { if (strcmp(str, p->word) == 0) { (p->count)++; return; } p = p->next; } // 新建一个结点 node_ptr q = (node_ptr)malloc(sizeof(node)); q->count = 1; q->word = (char *)malloc(sizeof(str) + 1); strcpy(q->word, str); // q->word = str; q->next = head[index]; head[index] = q; } //产生0~DOMAIN - 1范围内的MAX个整数 void gen_data() { FILE *fp = fopen("c://data1.txt", "w"); assert(fp); int i = 0; srand((int)(time(0))); for (i = 0; i < MAX; i++) fprintf(fp,"%d ",rand()%DOMAIN); fclose(fp); } //堆调整:调整为最小堆 void heapAdjust(node array[], int beginIndex, int endIndex, int index) { int length = endIndex - beginIndex + 1; int largestIndex = index; int leftIndex = 2 * index + 1; //下标从0开始,可以自己做实验 int rightIndex = 2 * index + 2; if (leftIndex <= length - 1 && array[leftIndex].count <= array[largestIndex].count ) { largestIndex = leftIndex; } if (rightIndex <= length - 1 && array[rightIndex].count <= array[largestIndex].count ) { largestIndex = rightIndex; } if (largestIndex != index) { node temp = array[largestIndex]; array[largestIndex] = array[index]; array[index] = temp; heapAdjust (array, beginIndex, endIndex, largestIndex); } } //建堆 void heapBuild (node array[], int len ) { int i = 0; for (i = len/2 - 1; i >= 0; i --) { heapAdjust (array, 0, len - 1, i); } } int main() { gen_data(); char str[WORDLEN]; // char *str; int i; int cnt1 = 0; for (i = 0; i < HASHLEN; i++) head[i] = NULL; FILE *fp_passage = fopen("c://data1.txt", "r"); assert(fp_passage); while (fscanf(fp_passage, "%s", str) != EOF) { cnt1++; append_word(str); } printf("the cnt1 is %d/n", cnt1); fclose(fp_passage); //寻找Top K for(i = 0; i < HASHLEN; i++) { if(i < K - 1) array[i] = *head[i]; else if(i == K - 1) { array[i] = *head[i]; heapBuild(array,K); } else { if(array[0].count < head[i]->count) { array[0] = *head[i]; heapAdjust(array,0,K - 1,0); } } } printf("the top K is as follows/n"); for(i = 0; i < K; i++) printf("%s , and its count is %d/n",array[i].word, array[i].count); // printf("the total number of word is %d",cnt); return 0; }

    

   运行结果:

100000个树中重复出现次数最多的前5个数:

the top K is as follows
49 , and its count is 372
4 , and its count is 374
249 , and its count is 373
246 , and its count is 380
227 , and its count is 376

    

你可能感兴趣的:(c,算法,struct,domain,include,FP)