主要的TODO列表如下(主要是怕以上这个网页也不在了...):
就会将所有的文件和目录置于该目录下,然后make;make install即可。注意,编译的时候需要准备好你的当前内核版本头文件,链接到/lib/modules/`uname -r`/build。 最后给出一个比较长的patch,即nfhipac-1.0.0.patch文件:
diff -urN nf-hipac/INSTALL nfhipac/INSTALL
--- nf-hipac/INSTALL 1970-01-01 08:00:00.000000000 +0800
+++ nfhipac/INSTALL 2014-11-21 13:00:42.000000000 +0800
@@ -0,0 +1,3 @@
+
+
+make install
diff -urN nf-hipac/kernel/dimtree.c nfhipac/kernel/dimtree.c
--- nf-hipac/kernel/dimtree.c 1970-01-01 08:00:00.000000000 +0800
+++ nfhipac/kernel/dimtree.c 2014-11-21 12:36:09.000000000 +0800
@@ -0,0 +1,4308 @@
+/*
+ * High performance packet classification
+ *
+ *
+ * (c) 2004-2005 MARA Systems AB
+ * +-----------------------------+
+ * | Michael Bellion |
+ * | |
+ * +-----------------------------+
+ *
+ * (c) 2002-2003 hipac core team :
+ * +---------------------------+--------------------------+
+ * | Michael Bellion | Thomas Heinz |
+ * | | |
+ * +---------------------------+--------------------------+
+ *
+ * Licenced under the GNU General Public Licence, version 2.
+ */
+
+
+#include "global.h"
+#include "ihash.h"
+#include "rlp.h"
+#include "dimtree.h"
+
+
+#define HAS_DT_MATCH(rule) ((rule)->dt_match_len > 0)
+#define ITH_DT_MATCH(rule, i) ((rule)->first_dt_match + (i))
+#define LAST_DT_MATCH(rule) ITH_DT_MATCH(rule, (rule)->dt_match_len - 1)
+#define LEN(array) (sizeof(array) / sizeof(*(array)))
+
+/*
+ * newspec keeps track of the rlps and elementary intervals that have been
+ * newly allocated during a series of dimtree operations;
+ * orgspec keeps track of the rlps and elementary intervals that can be
+ * freed after the series of dimtree operations has been successfully finished
+ */
+static struct ptrlist orgspec = {LIST_HEAD_INIT(orgspec.head), 0};
+static struct ihash *newspec = NULL;
+
+
+
+static inline void
+elem_free(struct dt_elem *e)
+{
+ if (unlikely(e == NULL)) {
+ ARG_MSG;
+ return;
+ }
+ hp_free(e);
+}
+
+
+/* free s which can be an elemtary interval or a rlp */
+static inline void
+rlp_elem_free(struct gen_spec *s)
+{
+ if (unlikely(s == NULL)) {
+ ARG_MSG;
+ return;
+ }
+ if (IS_RLP(s)) {
+ rlp_free((struct rlp_spec *) s);
+ } else {
+ /* s must be elemtary interval */
+ assert(IS_ELEM(s));
+ elem_free((struct dt_elem *) s);
+ }
+}
+
+/* set newspec bit of s which can be an elementary interval or a rlp to 0 */
+static inline void
+rlp_elem_newspec_set(struct gen_spec *s, int newspec_set)
+{
+ if (unlikely(s == NULL)) {
+ ARG_MSG;
+ return;
+ }
+ if (IS_RLP(s)) {
+ ((struct rlp_spec *) s)->newspec = !!newspec_set;
+ } else {
+ /* s must be elemtary interval */
+ assert(IS_ELEM(s));
+ ((struct dt_elem_spec *) s)->newspec = !!newspec_set;
+ }
+}
+
+/* call rlp_elem_free for each member of orgspec and empty orgspec */
+static inline void
+orgspec_dofree(void)
+{
+ struct list_head *lh;
+ struct ptrlist_entry* e;
+
+ for (lh = orgspec.head.next; lh != &orgspec.head;) {
+ e = list_entry(lh, struct ptrlist_entry, head);
+ lh = lh->next;
+ assert((IS_RLP(e->p) &&
+ !((struct rlp_spec *) e->p)->newspec) ||
+ (IS_ELEM(e->p) &&
+ !((struct dt_elem_spec *) e->p)->newspec));
+ rlp_elem_free(e->p);
+ mini_free(e);
+ }
+ INIT_LIST_HEAD(&orgspec.head);
+ orgspec.len = 0;
+}
+
+/* call rlp_elem_free for each member of newspec and empty newspec */
+static inline void
+newspec_dofree(void)
+{
+ if (unlikely(newspec == NULL)) {
+ return;
+ }
+ IHASH_KEY_ITERATE(newspec, struct gen_spec *, rlp_elem_free);
+ ihash_free(newspec);
+ newspec = NULL;
+}
+
+/* add s to orgspec;
+ possible errors: HE_LOW_MEMORY, HE_IMPOSSIBLE_CONDITION */
+static inline hipac_error
+orgspec_add(struct gen_spec *s)
+{
+ if (unlikely(s == NULL)) {
+ ARG_ERR;
+ }
+ assert((IS_RLP(s) && !((struct rlp_spec *) s)->newspec) ||
+ (IS_ELEM(s) && !((struct dt_elem_spec *) s)->newspec));
+#ifdef DEBUG
+ return ptrlist_add(&orgspec, s, 1);
+#else
+ return ptrlist_add(&orgspec, s, 0);
+#endif
+}
+
+/* empty orgspec */
+static inline void
+orgspec_flush(void)
+{
+ ptrlist_flush(&orgspec);
+}
+
+/* empty newspec; if newspec_reset is not 0 the newspec bit is set
+ to 0 for each element of newspec */
+static inline void
+newspec_flush(int newspec_reset)
+{
+ if (unlikely(newspec == NULL)) {
+ return;
+ }
+ if (newspec_reset) {
+ IHASH_KEY_ITERATE(newspec, struct gen_spec *,
+ rlp_elem_newspec_set, 0);
+ }
+ ihash_free(newspec);
+ newspec = NULL;
+}
+
+
+/*
+ * history operations
+ */
+
+static void
+history_undo(void)
+{
+ newspec_dofree();
+ orgspec_flush();
+}
+
+static void
+history_commit(int newspec_set)
+{
+ orgspec_dofree();
+ newspec_flush(newspec_set);
+}
+
+#ifdef DEBUG
+/* return 1 if orgspec and newspec are empty and 0 otherwise */
+static int
+history_is_empty(void)
+{
+ return newspec == NULL && list_empty(&orgspec.head);
+}
+#endif
+
+/* s is a new rlp or elementary interval layer which does __not__
+ replace another */
+static hipac_error
+history_new(struct gen_spec *s, int newspec_set)
+{
+ int stat;
+
+ if (unlikely(s == NULL)) {
+ ARG_ERR;
+ }
+
+ assert((IS_RLP(s) || IS_ELEM(s)));
+ if (unlikely(newspec == NULL)) {
+ newspec = ihash_new(INITIAL_NEWSPEC_LEN, 0,
+ NEWSPEC_AVRG_ELEM_PER_BUCKET,
+ ihash_func_val, eq_val);
+ if (newspec == NULL) {
+ return HE_LOW_MEMORY;
+ }
+ }
+ stat = ihash_insert(&newspec, s, NULL);
+ if (stat < 0) {
+ return stat;
+ }
+ if (newspec_set) {
+ rlp_elem_newspec_set(s, 1);
+ }
+ return stat;
+}
+
+static hipac_error
+history_replace(struct gen_spec *old, struct gen_spec *new, int newspec_set)
+{
+ int stat;
+
+ if (unlikely(old == NULL || new == NULL)) {
+ ARG_ERR;
+ }
+
+ assert((IS_RLP(old) && IS_RLP(new)) ||
+ (IS_ELEM(old) && IS_ELEM(new)));
+ assert(newspec_set ||
+ (IS_RLP(old) && !((struct rlp_spec *) old)->newspec) ||
+ (IS_ELEM(old) && !((struct dt_elem_spec *) old)->newspec));
+ assert(newspec_set ||
+ (IS_RLP(new) && !((struct rlp_spec *) new)->newspec) ||
+ (IS_ELEM(new) && !((struct dt_elem_spec *) new)->newspec));
+ if (unlikely(newspec == NULL)) {
+ if (newspec_set &&
+ ((IS_RLP(old) &&
+ ((struct rlp_spec *) old)->newspec) ||
+ (IS_ELEM(old) &&
+ ((struct dt_elem_spec *) old)->newspec))) {
+ IMPOSSIBLE_CONDITION("old must be contained in new"
+ "spec but newspec is empty");
+ }
+ newspec = ihash_new(INITIAL_NEWSPEC_LEN, 0,
+ NEWSPEC_AVRG_ELEM_PER_BUCKET,
+ ihash_func_val, eq_val);
+ if (newspec == NULL) {
+ return HE_LOW_MEMORY;
+ }
+ }
+ if (newspec_set &&
+ ((IS_RLP(old) && ((struct rlp_spec *) old)->newspec) ||
+ (IS_ELEM(old) && ((struct dt_elem_spec *) old)->newspec))) {
+
+ stat = ihash_replace(&newspec, old, NULL, new, NULL);
+ if (stat == HE_OK) {
+ rlp_elem_newspec_set(new, 1);
+ rlp_elem_free(old);
+ }
+ } else {
+ stat = orgspec_add(old);
+ if (stat < 0) {
+ return stat;
+ }
+ stat = ihash_insert(&newspec, new, NULL);
+ if (stat < 0) {
+ return stat;
+ }
+ if (newspec_set) {
+ rlp_elem_newspec_set(new, 1);
+ }
+ }
+ return stat;
+}
+
+/* s is an obsolete rlp or elementary interval layer */
+static hipac_error
+history_obsolete(struct gen_spec *s, int newspec_set)
+{
+ if (unlikely(s == NULL)) {
+ ARG_ERR;
+ }
+
+ assert((IS_RLP(s) || IS_ELEM(s)));
+ assert(newspec_set ||
+ (IS_RLP(s) && !((struct rlp_spec *) s)->newspec) ||
+ (IS_ELEM(s) && !((struct dt_elem_spec *) s)->newspec));
+ if (unlikely(newspec == NULL && newspec_set &&
+ ((IS_RLP(s) && ((struct rlp_spec *) s)->newspec) ||
+ (IS_ELEM(s) && ((struct dt_elem_spec *) s)->newspec)))) {
+ IMPOSSIBLE_CONDITION("s is obsolete, newspec_set is not 0 and"
+ " the newspec bit of s is set __but__ s "
+ "is not contained in newspec");
+ }
+ if (newspec_set &&
+ ((IS_RLP(s) && ((struct rlp_spec *) s)->newspec) ||
+ (IS_ELEM(s) && ((struct dt_elem_spec *) s)->newspec))) {
+ if (ihash_delete(newspec, s, NULL) < 0) {
+ IMPOSSIBLE_CONDITION("unable to remove s from "
+ "newspec");
+ }
+ rlp_elem_free(s);
+ return HE_OK;
+ }
+ return orgspec_add(s);
+}
+
+/* hp_realloc can result in a pointer becoming invalid; this function is used
+ to apply this fact to the history */
+static void
+history_del_invalid(struct gen_spec *s)
+{
+ if (unlikely(s == NULL)) {
+ ARG_MSG;
+ return;
+ }
+ if (ihash_delete(newspec, s, NULL) < 0) {
+ ERR("unable to remove invalid pointer from newspec");
+ }
+}
+
+
+
+/*
+ * termrule operations
+ */
+
+/* insert 'rule' in 'term' in sorted order (sorted after pointer addresses);
+ 'term' must be sorted before */
+static inline hipac_error
+termrule_insert(struct ptrblock **term, struct dt_rule *rule)
+{
+ __u32 i;
+
+ if (unlikely(term == NULL || rule == NULL)) {
+ ARG_ERR;
+ }
+
+ if (*term == NULL) {
+ *term = ptrblock_new(rule, 1);
+ if (*term == NULL) {
+ return HE_LOW_MEMORY;
+ }
+ return HE_OK;
+ }
+
+#ifdef BIT32_ARCH
+ for (i = 0; i < (*term)->len &&
+ (__u32) (*term)->p[i] < (__u32) rule; i++);
+#else
+ for (i = 0; i < (*term)->len &&
+ (__u64) (*term)->p[i] < (__u64) rule; i++);
+#endif
+ if (i < (*term)->len && (*term)->p[i] == rule) {
+ IMPOSSIBLE_CONDITION("rule is already contained in term");
+ }
+ return ptrblock_insert(term, rule, i);
+}
+
+/* delete 'rule' from 'term' which must be in sorted order (sorted after
+ pointer addresses) */
+static inline hipac_error
+termrule_delete(struct ptrblock **term, const struct dt_rule *rule)
+{
+ __u32 i;
+
+ if (unlikely(term == NULL || rule == NULL)) {
+ ARG_ERR;
+ }
+ if (*term == NULL) {
+ /* rule is not in term */
+ return HE_OK;
+ }
+
+#ifdef BIT32_ARCH
+ for (i = 0; i < (*term)->len &&
+ (__u32) (*term)->p[i] < (__u32) rule; i++);
+#else
+ for (i = 0; i < (*term)->len &&
+ (__u64) (*term)->p[i] < (__u64) rule; i++);
+#endif
+
+ if (i >= (*term)->len || (*term)->p[i] != rule) {
+ /* rule is not in term */
+ return HE_OK;
+ }
+ return ptrblock_delete_pos(term, i);
+}
+
+/* delete those rules from 'term' whose match boundaries in dimension 'dimid'
+ lie completely within ['left', 'right'] */
+static inline hipac_error
+termrule_delete_ovl(struct ptrblock **term, __u32 left, __u32 right,
+ __u8 dimid)
+{
+ __u32 i, curleft, curight;
+ struct dt_match *match;
+ int stat;
+
+ if (unlikely(term == NULL || left > right ||
+ left > MAXKEY(dim2btype[dimid]) ||
+ right > MAXKEY(dim2btype[dimid]))) {
+ ARG_ERR;
+ }
+ if (*term == NULL) {
+ return HE_OK;
+ }
+
+ for (i = 0; i < (*term)->len;) {
+ match = HAS_DT_MATCH((struct dt_rule *) (*term)->p[i]) ?
+ LAST_DT_MATCH((struct dt_rule *) (*term)->p[i]) : NULL;
+ if (match != NULL && match->dimid == dimid) {
+ assert(match->left > 0 ||
+ match->right < MAXKEY(dim2btype[dimid]));
+ curleft = match->left;
+ curight = match->right;
+ } else {
+ curleft = 0;
+ curight = MAXKEY(dim2btype[dimid]);
+ }
+ if (curleft >= left && curight <= right) {
+ stat = ptrblock_delete_pos(term, i);
+ if (stat < 0) {
+ return stat;
+ }
+ if (*term == NULL) {
+ return HE_OK;
+ }
+ } else {
+ i++;
+ }
+ }
+ return HE_OK;
+}
+
+/* returns 1 if there is a rule in 'term' whose last match m produces the
+ interval represented by 'right' and dimid(m) == 'dimid' */
+static inline int
+termrule_exists(const struct ptrblock *term, __u8 dimid, __u32 right)
+{
+ struct dt_match *match;
+ struct dt_rule **rule;
+ __u32 i;
+
+ if (unlikely(right > MAXKEY(dim2btype[dimid]))) {
+ ARG_MSG;
+ return 0;
+ }
+ if (term == NULL) {
+ return 0;
+ }
+
+ rule = (struct dt_rule **) term->p;
+ for (i = 0; i < term->len; i++) {
+ match = HAS_DT_MATCH(*rule) ? LAST_DT_MATCH(*rule) : NULL;
+ if (match != NULL && match->dimid == dimid &&
+ (match->right == right ||
+ (match->left > 0 && match->left - 1 == right))) {
+ return 1;
+ }
+ rule++;
+ }
+ return 0;
+}
+
+/* return 1 if 'rule' terminates in the elementary interval described by
+ 'right' resp. 'wildcard' and 'dimid'; otherwise 0 is returned */
+static inline int
+rule_term(const struct dt_rule *rule, __u32 right, __u8 wildcard, __u8 dimid)
+{
+ __u32 lbound, ubound;
+ const struct dt_match *match;
+ __u8 match_wc, match_nwc1, match_nwc2;
+
+ if (unlikely(rule == NULL || (wildcard && !HAS_WILDCARD_DIM(dimid)))) {
+ ARG_MSG;
+ return 0;
+ }
+
+ match = HAS_DT_MATCH(rule) ? LAST_DT_MATCH(rule) : NULL;
+ if (match != NULL && match->dimid == dimid) {
+ assert(match->left > 0 ||
+ match->right < MAXKEY(dim2btype[dimid]));
+ lbound = match->left;
+ ubound = match->right;
+ } else if (match == NULL || match->dimid < dimid) {
+ lbound = 0;
+ ubound = MAXKEY(dim2btype[dimid]);
+ } else {
+ return 0;
+ }
+
+ match_wc = wildcard && (match == NULL || match->dimid < dimid);
+
+ match_nwc1 = !wildcard && HAS_WILDCARD_DIM(dimid) &&
+ match != NULL && match->dimid == dimid && ubound >= right &&
+ lbound <= right;
+
+ match_nwc2 = !wildcard && !HAS_WILDCARD_DIM(dimid) &&
+ ubound >= right && lbound <= right;
+
+ return match_wc || match_nwc1 || match_nwc2;
+}
+
+/* store the subset of rules from 'term' that terminate in the elemtary
+ interval represented by 'right' resp. 'wildcard' in dimension 'dimid'
+ in 'subterm' */
+static inline hipac_error
+termrule_subset(const struct ptrblock *term, struct ptrblock **subterm,
+ __u32 right, __u8 wildcard, __u8 dimid)
+{
+ struct dt_rule **rule;
+ int stat;
+ __u32 i;
+
+ if (unlikely(subterm == NULL)) {
+ ARG_ERR;
+ }
+
+ *subterm = NULL;
+ if (term == NULL) {
+ return HE_OK;
+ }
+
+ rule = (struct dt_rule **) term->p;
+ for (i = 0; i < term->len; i++, rule++) {
+ if (rule_term(*rule, right, wildcard, dimid)) {
+ stat = ptrblock_insert(
+ subterm, *rule, *subterm == NULL ? 0 :
+ (*subterm)->len);
+ if (stat < 0) {
+ if (*subterm != NULL) {
+ ptrblock_free(*subterm);
+ }
+ *subterm = NULL;
+ return stat;
+ }
+ }
+ }
+ return HE_OK;
+}
+
+/* merge 'tmpterm' into 'term' so that there are no duplicates;
+ 'tmpterm' is freed even if termrule_merge fails */
+static inline hipac_error
+termrule_merge(struct ptrblock **term, struct ptrlist *tmpterm)
+{
+ struct ptrlist_entry *e;
+ struct list_head *lh;
+ int stat;
+ __u32 i;
+
+ if (unlikely(term == NULL || tmpterm == NULL)) {
+ ARG_ERR;
+ }
+
+ if (ptrlist_is_empty(tmpterm)) {
+ ptrlist_free(tmpterm);
+ return HE_OK;
+ }
+
+ for (lh = tmpterm->head.next, i = 0; lh != &tmpterm->head;) {
+ e = list_entry(lh, struct ptrlist_entry, head);
+#ifdef BIT32_ARCH
+ for (; *term != NULL && i < (*term)->len &&
+ (__u32) (*term)->p[i] < (__u32) e->p; i++);
+#else
+ for (; *term != NULL && i < (*term)->len &&
+ (__u64) (*term)->p[i] < (__u64) e->p; i++);
+#endif
+ if (*term == NULL || i == (*term)->len) {
+ /* append rest of tmpterm to term */
+ do {
+ stat = ptrblock_insert(
+ term, e->p, *term == NULL ? 0 :
+ (*term)->len);
+ if (stat < 0) {
+ goto error;
+ }
+ lh = lh->next;
+ ptrlist_free_entry(e);
+ e = list_entry(lh, struct ptrlist_entry, head);
+ } while (lh != &tmpterm->head);
+ break;
+ }
+ if (e->p != (*term)->p[i]) {
+ stat = ptrblock_insert(term, e->p, i++);
+ if (stat < 0) {
+ goto error;
+ }
+ }
+ lh = lh->next;
+ ptrlist_free_entry(e);
+ }
+ ptrlist_free(tmpterm);
+ return HE_OK;
+
+ error:
+ ptrlist_free(tmpterm);
+ return stat;
+}
+
+/* remove all elements of 'delterm' from 'term'; 'delterm' must be completely
+ contained in 'term' */
+static inline hipac_error
+termrule_cut(struct ptrblock **term, struct ptrblock *delterm)
+{
+ __u32 i, j;
+ int stat;
+
+ if (unlikely(term == NULL)) {
+ ARG_ERR;
+ }
+
+ if (delterm == NULL) {
+ return HE_OK;
+ }
+ if (unlikely(*term == NULL)) {
+ IMPOSSIBLE_CONDITION("unable to cut elements from empty "
+ "termrule block");
+ }
+
+ for (i = 0, j = 0; *term != NULL && i < (*term)->len &&
+ j < delterm->len; j++) {
+#ifdef BIT32_ARCH
+ for (; i < (*term)->len &&
+ (__u32) (*term)->p[i] < (__u32) delterm->p[j];
+ i++);
+#else
+ for (; i < (*term)->len &&
+ (__u64) (*term)->p[i] < (__u64) delterm->p[j];
+ i++);
+#endif
+ if (i >= (*term)->len || (*term)->p[i] != delterm->p[j]) {
+ goto error;
+ }
+ stat = ptrblock_delete_pos(term, i);
+ if (stat < 0) {
+ return stat;
+ }
+ }
+ if (j >= delterm->len) {
+ return HE_OK;
+ }
+
+ error:
+ IMPOSSIBLE_CONDITION("delterm contains elements which are not "
+ "contained in term");
+}
+
+/* return the terminal rule (terminal target + no function based matches)
+ from 'term' which dominates the elementary interval represented by 'right'
+ resp. 'wildcard' in the dimension specified by 'dimid' and which does not
+ equal 'rule' */
+static inline struct dt_rule *
+termrule_find_best_term(const struct ptrblock *term,
+ const struct dt_rule *rule,
+ __u32 right, __u8 wildcard, __u8 dimid)
+{
+ struct dt_rule *best = NULL;
+ __u32 nextpos = (__u32) ULONG_MAX;
+ struct dt_rule *tr;
+ __u32 i;
+
+ if (unlikely(term == NULL || rule == NULL ||
+ right > MAXKEY(dim2btype[dimid]) ||
+ (wildcard && !HAS_WILDCARD_DIM(dimid)))) {
+ ARG_MSG;
+ return NULL;
+ }
+
+ for (i = 0; i < term->len; i++) {
+ tr = term->p[i];
+ if (!IS_RULE_TERM(tr) || tr == rule) {
+ continue;
+ }
+ if (rule_term(tr, right, wildcard, dimid) &&
+ tr->spec.pos < nextpos) {
+ nextpos = tr->spec.pos;
+ best = tr;
+ }
+ }
+ return best;
+}
+
+/* return the number(*) of non-terminal rules (non-terminal target or function
+ based matches) in 'term' not equal to 'rule' which terminate in the
+ elementary interval represented by 'right' resp. 'wildcard' in the
+ dimension specified by 'dimid' and whose position is < term_rule->spec.pos
+ if term_rule != NULL; if there is exactly one such non-terminal rule it is
+ stored in 'ntm_rule';
+ (*) the return value ret is 0, 1 or 2; ret == 0 || ret == 1 means there are
+ exactly ret non-terminal rules; ret == 2 means there are >= 2
+ non-terminal rules */
+static inline __u32
+termrule_num_ntm(struct dt_rule **ntm_rule, const struct ptrblock *term,
+ const struct dt_rule *term_rule, const struct dt_rule *rule,
+ __u32 right, __u8 wildcard, __u8 dimid)
+{
+ __u32 num = 0;
+ struct dt_rule *tr;
+ __u32 i;
+
+ if (unlikely(ntm_rule == NULL || term == NULL || rule == NULL ||
+ right > MAXKEY(dim2btype[dimid]) ||
+ (wildcard && !HAS_WILDCARD_DIM(dimid)))) {
+ ARG_MSG;
+ return 0;
+ }
+
+ *ntm_rule = NULL;
+ for (i = 0; i < term->len; i++) {
+ tr = term->p[i];
+ if (IS_RULE_TERM(tr) || tr == rule ||
+ (term_rule != NULL &&
+ tr->spec.pos >= term_rule->spec.pos)) {
+ continue;
+ }
+ if (rule_term(tr, right, wildcard, dimid)) {
+ *ntm_rule = tr;
+ if (++num == 2) {
+ /* there are at least 2 non-terminal rules
+ => stop searching */
+ *ntm_rule = NULL;
+ return num;
+ }
+ }
+ }
+ if (num > 1) {
+ *ntm_rule = NULL;
+ }
+ return num;
+}
+
+/* store all non-terminating rules (non-terminal target or function based
+ matches) from 'term' not equal to rule in 'e' which terminate in the
+ elementary interval represented by 'right' resp. 'wildcard' in the
+ dimension specified by 'dimid' and whose position is < max_rule->spec.pos
+ if max_rule != NULL and > min_rule->spec.pos if min_rule != NULL;
+ the rules are stored in e->ntm_rules in sorted order (sorted after their
+ positions) */
+static inline hipac_error
+termrule_insert_ntm(struct dt_elem **e, const struct ptrblock *term,
+ const struct dt_rule *min_rule,
+ const struct dt_rule *max_rule,
+ const struct dt_rule *rule,
+ __u32 right, __u8 wildcard, __u8 dimid)
+{
+ struct dt_rule *tr;
+ __u32 i, j, stat;
+
+ if (unlikely(e == NULL || *e == NULL || term == NULL ||
+ right > MAXKEY(dim2btype[dimid]) ||
+ (wildcard && !HAS_WILDCARD_DIM(dimid)))) {
+ ARG_ERR;
+ }
+
+ for (i = 0; i < term->len; i++) {
+ tr = term->p[i];
+ if (IS_RULE_TERM(tr) || tr == rule ||
+ (min_rule != NULL &&
+ (tr->spec.pos <= min_rule->spec.pos)) ||
+ (max_rule != NULL &&
+ (tr->spec.pos >= max_rule->spec.pos))) {
+ continue;
+ }
+ if (rule_term(tr, right, wildcard, dimid)) {
+ for (j = 0; j < (*e)->ntm_rules.len &&
+ ((struct dt_rule *)
+ (*e)->ntm_rules.p[j])->spec.pos <
+ tr->spec.pos; j++);
+ stat = ptrblock_insert_embed((void **) e,
+ offsetof(struct dt_elem,
+ ntm_rules),
+ tr, j);
+ if (stat < 0) {
+ return stat;
+ }
+ }
+ }
+ return HE_OK;
+}
+
+
+
+/*
+ * tmp_termrule operations
+ */
+
+static inline struct ptrlist *
+tmp_termrule_new(void)
+{
+ return ptrlist_new();
+}
+
+static inline void
+tmp_termrule_free(struct ptrlist *tmpterm)
+{
+ return ptrlist_free(tmpterm);
+}
+
+/* merge 'term' into 'tmpterm' so that there are no duplicates */
+static inline hipac_error
+tmp_termrule_merge(struct ptrlist *tmpterm, struct ptrblock *term)
+{
+ struct ptrlist_entry *e;
+ struct list_head *lh;
+ int stat;
+ __u32 i;
+
+ if (unlikely(tmpterm == NULL)) {
+ ARG_ERR;
+ }
+
+ if (term == NULL) {
+ return HE_OK;
+ }
+
+ for (i = 0, lh = tmpterm->head.next; i < term->len; i++) {
+#ifdef BIT32_ARCH
+ for (; lh != &tmpterm->head &&
+ (__u32) list_entry(lh, struct ptrlist_entry,
+ head)->p <
+ (__u32) term->p[i]; lh = lh->next);
+#else
+ for (; lh != &tmpterm->head &&
+ (__u64) list_entry(lh, struct ptrlist_entry,
+ head)->p <
+ (__u64) term->p[i]; lh = lh->next);
+#endif
+ if (lh == &tmpterm->head) {
+ /* append rest of term to tmpterm */
+ for (; i < term->len; i++) {
+ stat = ptrlist_add(tmpterm, term->p[i], 0);
+ if (stat < 0) {
+ return stat;
+ }
+ }
+ break;
+ }
+ e = list_entry(lh, struct ptrlist_entry, head);
+ if (e->p != term->p[i]) {
+ e = ptrlist_new_entry(term->p[i]);
+ if (e == NULL) {
+ return HE_LOW_MEMORY;
+ }
+ list_add_tail(&e->head, lh);
+ tmpterm->len++;
+ }
+ }
+ return HE_OK;
+}
+
+
+
+/*
+ * elementary interval operations
+ */
+
+/* create new elementary interval layer with ntm_len non-terminal rules
+ which are stored in ntm_rules sorted after their positions */
+static inline struct dt_elem *
+elem_new(struct dt_rule *term_rule, struct dt_rule *ntm_rules[], __u32 ntm_len)
+{
+ struct dt_elem *e;
+ __u32 i;
+
+ if (unlikely(ntm_len == 0 || ntm_rules == NULL || *ntm_rules == NULL ||
+ (termrule == NULL && ntm_len <= 1))) {
+ ARG_MSG;
+ return NULL;
+ }
+
+ e = hp_alloc(sizeof(*e) + ntm_len * sizeof(*e->ntm_rules.p), 1);
+ if (e == NULL) {
+ return NULL;
+ }
+ e->spec.rlp = 0;
+ e->spec.rtype = RT_ELEM;
+ e->spec.newspec = 0;
+ e->term_rule = term_rule;
+ e->ntm_rules.len = ntm_len;
+ for (i = 0; i < ntm_len; i++) {
+ e->ntm_rules.p[i] = ntm_rules[i];
+ }
+ return e;
+}
+
+/* create new elementary interval layer with 0 non-terminal rules; notice that
+ the resulting elementary interval is not valid because it __must__ contain
+ at least one non-terminal rule */
+static inline struct dt_elem *
+elem_new_empty(struct dt_rule *term_rule)
+{
+ struct dt_elem *e;
+
+ e = hp_alloc(sizeof(*e), 1);
+ if (e == NULL) {
+ return NULL;
+ }
+ e->spec.rlp = 0;
+ e->spec.rtype = RT_ELEM;
+ e->spec.newspec = 0;
+ e->term_rule = term_rule;
+ e->ntm_rules.len = 0;
+ return e;
+}
+
+static inline int
+elem_eq(const struct dt_elem *e1, const struct dt_elem *e2)
+{
+ if (e1 == NULL || e2 == NULL || !IS_ELEM(e1) || !IS_ELEM(e2)) {
+ ARG_MSG;
+ return 0;
+ }
+ if (e1->term_rule != e2->term_rule ||
+ !ptrblock_eq(&e1->ntm_rules, &e2->ntm_rules)) {
+ return 0;
+ }
+ return 1;
+}
+
+static inline hipac_error
+elem_clone(struct dt_elem *e, struct dt_elem **clone)
+{
+ if (e == NULL || clone == NULL) {
+ ARG_ERR;
+ }
+
+ *clone = hp_alloc(sizeof(*e) + e->ntm_rules.len *
+ sizeof(*e->ntm_rules.p), 1);
+ if (*clone == NULL) {
+ return HE_LOW_MEMORY;
+ }
+ memcpy(*clone, e, sizeof(*e) + e->ntm_rules.len *
+ sizeof(*e->ntm_rules.p));
+ return HE_OK;
+}
+
+/* forward declaration */
+static int
+rlp_eq_rec(const struct rlp_spec *spec1, const struct rlp_spec *spec2);
+
+/* return 1 if g1 and g2 are equal and rules;
+ return 2 if g1 and g2 are equal and elementary intervals;
+ return 3 if g1 and g2 are equal and rlps;
+ return 0 otherwise */
+static inline int
+rlp_rule_elem_eq(const struct gen_spec *g1, const struct gen_spec *g2)
+{
+ if (g1 == NULL || g2 == NULL ||
+ (IS_RULE(g1) && IS_RULE(g2))) {
+ return g1 == g2;
+ } else if (IS_ELEM(g1) && IS_ELEM(g2)) {
+ struct dt_elem *e1 = (struct dt_elem *) g1;
+ struct dt_elem *e2 = (struct dt_elem *) g2;
+
+ if (e1->ntm_rules.len != e2->ntm_rules.len) {
+ return 0;
+ }
+ return elem_eq(e1, e2) ? 2 : 0;
+ } else if (IS_RLP(g1) && IS_RLP(g2)) {
+ struct rlp_spec *b1 = (struct rlp_spec *) g1;
+ struct rlp_spec *b2 = (struct rlp_spec *) g2;
+
+ return (rlp_spec_eq(b1, b2) && rlp_eq_rec(b1, b2)) ? 3 : 0;
+ }
+ return 0;
+}
+
+/* insert rule into rule_elem which can be a rule or an elementary interval
+ layer; the result which can be a rule or an elementary interval layer
+ is directly written to rule_elem */
+static inline hipac_error
+rule_elem_insert(struct dt_rule_elem_spec **rule_elem, struct dt_rule *rule,
+ int newspec_set)
+{
+ int stat;
+
+ if (unlikely(rule_elem == NULL || rule == NULL)) {
+ ARG_ERR;
+ }
+
+ if (*rule_elem == NULL) {
+ *rule_elem = (struct dt_rule_elem_spec *) rule;
+ return HE_OK;
+ }
+
+ assert(IS_RULE(*rule_elem) || IS_ELEM(*rule_elem));
+ assert(!IS_ELEM(*rule_elem) ||
+ ((struct dt_elem *) *rule_elem)->ntm_rules.len > 0);
+ assert(!IS_ELEM(*rule_elem) ||
+ ((struct dt_elem *) *rule_elem)->term_rule != NULL ||
+ ((struct dt_elem *) *rule_elem)->ntm_rules.len > 1);
+
+ if (IS_RULE(*rule_elem)) {
+ struct dt_rule *r = (struct dt_rule *) *rule_elem;
+
+ if (IS_RULE_TERM(rule) && IS_RULE_TERM(r)) {
+ if (rule->spec.pos < r->spec.pos) {
+ *rule_elem = (struct dt_rule_elem_spec *) rule;
+ }
+ return HE_OK;
+
+ } else if (!IS_RULE_TERM(rule) && !IS_RULE_TERM(r)) {
+ struct dt_rule *ntm[2];
+ struct dt_elem *e;
+ if (r->spec.pos < rule->spec.pos) {
+ ntm[0] = r;
+ ntm[1] = rule;
+ } else {
+ ntm[0] = rule;
+ ntm[1] = r;
+ }
+ e = elem_new(NULL, ntm, sizeof(ntm) / sizeof(*ntm));
+ if (e == NULL) {
+ return HE_LOW_MEMORY;
+ }
+ stat = history_new((struct gen_spec *) e, newspec_set);
+ if (stat < 0) {
+ elem_free(e);
+ return stat;
+ }
+ *rule_elem = (struct dt_rule_elem_spec *) e;
+ return HE_OK;
+
+ } else {
+ struct dt_rule *term_rule, *ntm_rule;
+ struct dt_elem *e;
+ if (IS_RULE_TERM(rule)) {
+ term_rule = rule;
+ ntm_rule = r;
+ } else {
+ term_rule = r;
+ ntm_rule = rule;
+ }
+ if (term_rule->spec.pos < ntm_rule->spec.pos) {
+ *rule_elem = (struct dt_rule_elem_spec *)
+ term_rule;
+ return HE_OK;
+ }
+ e = elem_new(term_rule, &ntm_rule, 1);
+ if (e == NULL) {
+ return HE_LOW_MEMORY;
+ }
+ stat = history_new((struct gen_spec *) e, newspec_set);
+ if (stat < 0) {
+ elem_free(e);
+ return stat;
+ }
+ *rule_elem = (struct dt_rule_elem_spec *) e;
+ return HE_OK;
+ }
+ } else {
+ /* IS_ELEM(*rule_elem) */
+ struct dt_elem *e = (struct dt_elem *) *rule_elem;
+ __u32 i;
+
+ if (e->term_rule != NULL &&
+ rule->spec.pos > e->term_rule->spec.pos) {
+ /* rule is never matched */
+ return HE_OK;
+ }
+ if (IS_RULE_TERM(rule)) {
+ /* find still matching rules if any */
+ if (((struct dt_rule *) e->ntm_rules.p[0])->spec.pos >
+ rule->spec.pos) {
+ stat = history_obsolete((struct gen_spec *) e,
+ newspec_set);
+ if (stat < 0) {
+ return stat;
+ }
+ *rule_elem = (struct dt_rule_elem_spec *) rule;
+ return HE_OK;
+ }
+ e->term_rule = rule;
+ i = e->ntm_rules.len;
+ do {
+ i--;
+ if (((struct dt_rule *)
+ e->ntm_rules.p[i])->spec.pos <
+ rule->spec.pos) {
+ break;
+ }
+ } while (i > 0);
+ assert(((struct dt_rule *)
+ e->ntm_rules.p[i])->spec.pos < rule->spec.pos);
+ if (i < e->ntm_rules.len - 1) {
+ struct dt_elem *e2;
+ e2 = hp_realloc(e, sizeof(*e) + (i + 1) *
+ sizeof(*e->ntm_rules.p));
+ if (e2 == NULL) {
+ /* this should never happen as we
+ shrink e */
+ return HE_LOW_MEMORY;
+ }
+ if (e != e2) {
+ history_del_invalid(
+ (struct gen_spec *) e);
+ stat = history_new(
+ (struct gen_spec *) e2,
+ newspec_set);
+ if (stat < 0) {
+ elem_free(e2);
+ return stat;
+ }
+ }
+ e2->ntm_rules.len = i + 1;
+ *rule_elem = (struct dt_rule_elem_spec *) e2;
+ }
+ return HE_OK;
+
+ } else {
+ /* !IS_RULE_TERM(rule) */
+ for (i = 0; i < e->ntm_rules.len &&
+ ((struct dt_rule *)
+ e->ntm_rules.p[i])->spec.pos <
+ rule->spec.pos; i++);
+ stat = ptrblock_insert_embed((void **) rule_elem,
+ offsetof(struct dt_elem,
+ ntm_rules),
+ rule, i);
+ if (stat < 0) {
+ return stat;
+ }
+ if (e != (struct dt_elem *) *rule_elem) {
+ history_del_invalid((struct gen_spec *) e);
+ stat = history_new((struct gen_spec *)
+ *rule_elem, newspec_set);
+ if (stat < 0) {
+ elem_free((struct dt_elem *)
+ *rule_elem);
+ return stat;
+ }
+ }
+ return HE_OK;
+ }
+ }
+}
+
+/* delete rule from rule_elem which can be a rule or an elementary interval
+ layer; if rule is not contained in rule_elem nothing happens;
+ the result which can be a rule or an elementary interval layer is directly
+ written to rule_elem; term, right, wildcard and dimid must be given to
+ find the next best rule(s) if necessary */
+static inline hipac_error
+rule_elem_delete(struct dt_rule_elem_spec **rule_elem,
+ const struct dt_rule *rule, const struct ptrblock *term,
+ __u32 right, __u8 wildcard, __u8 dimid, int newspec_set)
+{
+ int stat;
+
+ if (unlikely(rule_elem == NULL || rule == NULL || term == NULL ||
+ right > MAXKEY(dim2btype[dimid]) ||
+ (wildcard && !HAS_WILDCARD_DIM(dimid)))) {
+ ARG_ERR;
+ }
+
+ if (*rule_elem == NULL) {
+ /* rule is not contained in rule_elem */
+ return HE_OK;
+ }
+
+ assert(IS_RULE(*rule_elem) || IS_ELEM(*rule_elem));
+ assert(!IS_ELEM(*rule_elem) ||
+ ((struct dt_elem *) *rule_elem)->ntm_rules.len > 0);
+ assert(!IS_ELEM(*rule_elem) ||
+ ((struct dt_elem *) *rule_elem)->term_rule != NULL ||
+ ((struct dt_elem *) *rule_elem)->ntm_rules.len > 1);
+
+ if (IS_RULE(*rule_elem)) {
+ struct dt_rule *r = (struct dt_rule *) *rule_elem;
+ struct dt_rule *term_rule, *ntm_rule = NULL;
+ __u32 ntm_num;
+
+ if (r != rule) {
+ /* rule is not contained in rule_elem */
+ return HE_OK;
+ }
+
+ /* in fact it would suffice to call termrule_find_best_term
+ only if IS_RULE_TERM(r) */
+ term_rule = termrule_find_best_term(term, rule, right,
+ wildcard, dimid);
+ ntm_num = termrule_num_ntm(&ntm_rule, term, term_rule, rule,
+ right, wildcard, dimid);
+ if (term_rule == NULL && ntm_num <= 1) {
+ *rule_elem = (struct dt_rule_elem_spec *) ntm_rule;
+ return HE_OK;
+ } else if (term_rule != NULL && ntm_num == 0) {
+ *rule_elem = (struct dt_rule_elem_spec *) term_rule;
+ return HE_OK;
+ } else {
+ struct dt_elem *e = elem_new_empty(term_rule);
+ if (e == NULL) {
+ return HE_LOW_MEMORY;
+ }
+ stat = termrule_insert_ntm(&e, term, NULL, term_rule,
+ rule, right, wildcard,
+ dimid);
+ if (stat < 0) {
+ hp_free(e);
+ return stat;
+ }
+ assert(e->ntm_rules.len > 0);
+ stat = history_new((struct gen_spec *) e, newspec_set);
+ if (stat < 0) {
+ elem_free(e);
+ return stat;
+ }
+ *rule_elem = (struct dt_rule_elem_spec *) e;
+ return HE_OK;
+ }
+ } else {
+ /* IS_ELEM(*rule_elem) */
+ struct dt_elem *e = (struct dt_elem *) *rule_elem;
+ __u32 i;
+
+ if (e->term_rule == rule) {
+ struct dt_rule *term_rule;
+ term_rule = termrule_find_best_term(
+ term, rule, right, wildcard, dimid);
+ stat = termrule_insert_ntm(
+ (struct dt_elem **) rule_elem, term,
+ e->ntm_rules.p[e->ntm_rules.len - 1],
+ term_rule, rule, right, wildcard, dimid);
+ if (stat < 0) {
+ /* we only care about rule_elem if its address
+ has changed; otherwise rule_elem is
+ handled by the history */
+ if (e != (struct dt_elem *) *rule_elem) {
+ history_del_invalid((struct gen_spec *)
+ e);
+ elem_free((struct dt_elem *)
+ *rule_elem);
+ }
+ return stat;
+ }
+ if (e != (struct dt_elem *) *rule_elem) {
+ history_del_invalid((struct gen_spec *) e);
+ stat = history_new((struct gen_spec *)
+ *rule_elem, newspec_set);
+ if (stat < 0) {
+ elem_free((struct dt_elem *)
+ *rule_elem);
+ return stat;
+ }
+ }
+ e = (struct dt_elem *) *rule_elem;
+ if (term_rule == NULL && e->ntm_rules.len == 1) {
+ struct dt_rule_elem_spec *ntm =
+ e->ntm_rules.p[0];
+ stat = history_obsolete((struct gen_spec *) e,
+ newspec_set);
+ if (stat < 0) {
+ return stat;
+ }
+ *rule_elem = ntm;
+ return HE_OK;
+ }
+ e->term_rule = term_rule;
+ return HE_OK;
+ } else {
+ for (i = 0; i < e->ntm_rules.len &&
+ ((struct dt_rule *)
+ e->ntm_rules.p[i])->spec.pos <
+ rule->spec.pos; i++);
+ if (i >= e->ntm_rules.len ||
+ e->ntm_rules.p[i] != rule) {
+ /* rule is not contained in rule_elem */
+ return HE_OK;
+ }
+ if (e->ntm_rules.len == 1) {
+ struct dt_rule_elem_spec *tm =
+ (struct dt_rule_elem_spec *)
+ e->term_rule;
+ stat = history_obsolete((struct gen_spec *) e,
+ newspec_set);
+ if (stat < 0) {
+ return stat;
+ }
+ *rule_elem = tm;
+ return HE_OK;
+ } else if (e->term_rule == NULL &&
+ e->ntm_rules.len == 2) {
+ struct dt_rule_elem_spec *ntm =
+ (struct dt_rule_elem_spec *)
+ e->ntm_rules.p[(i + 1) % 2];
+ stat = history_obsolete((struct gen_spec *) e,
+ newspec_set);
+ if (stat < 0) {
+ return stat;
+ }
+ *rule_elem = ntm;
+ return HE_OK;
+ } else {
+ stat = ptrblock_delete_pos_embed(
+ (void **) rule_elem,
+ offsetof(struct dt_elem, ntm_rules),
+ i);
+ if (stat < 0) {
+ return stat;
+ }
+ if (e != (struct dt_elem *) *rule_elem) {
+ history_del_invalid(
+ (struct gen_spec *) e);
+ stat = history_new((struct gen_spec *)
+ *rule_elem,
+ newspec_set);
+ if (stat < 0) {
+ elem_free((struct dt_elem *)
+ *rule_elem);
+ return stat;
+ }
+ }
+ return HE_OK;
+ }
+ }
+ }
+}
+
+
+
+/*
+ * recursive rlp operations
+ */
+
+/* necessary forward declaration */
+static hipac_error
+rlp_clone_rec(const struct rlp_spec *spec, struct rlp_spec **clone,
+ int newspec_set);
+
+static inline hipac_error
+rlp_clone_help(struct gen_spec **g, int newspec_set)
+{
+ int stat = HE_OK;
+
+ if (*g == NULL) {
+ return HE_OK;
+ }
+ if (IS_RLP(*g)) {
+ stat = rlp_clone_rec((struct rlp_spec *) *g,
+ (struct rlp_spec **) g,
+ newspec_set);
+ if (stat < 0) {
+ return stat;
+ }
+ } else if (IS_ELEM(*g)) {
+ struct dt_elem *clone;
+ stat = elem_clone((struct dt_elem *) *g, &clone);
+ if (stat < 0) {
+ return stat;
+ }
+ stat = history_new((struct gen_spec *) clone,
+ newspec_set);
+ if (stat < 0) {
+ elem_free(clone);
+ return stat;
+ }
+ *g = (struct gen_spec *) clone;
+ }
+ return HE_OK;
+}
+
+/* clone spec including the elementary interval layers recursively and call
+ history_new for each clone;
+ possible errors: HE_LOW_MEMORY, HE_IMPOSSIBLE_CONDITION */
+static hipac_error
+rlp_clone_rec(const struct rlp_spec *spec, struct rlp_spec **clone,
+ int newspec_set)
+{
+ struct gen_spec **nextspec = NULL;
+ __u32 size;
+ int stat;
+ __u16 n;
+
+ if (unlikely(spec == NULL || clone == NULL)) {
+ ARG_ERR;
+ }
+
+ size = rlp_size(spec);
+ *clone = hp_alloc(size, 1);
+ if (*clone == NULL) {
+ return HE_LOW_MEMORY;
+ }
+
+ memcpy(*clone, spec, size);
+ stat = ptrblock_clone(*termrule(spec), termrule(*clone));
+ if (stat < 0) {
+ hp_free(*clone);
+ return stat;
+ }
+
+ stat = history_new((struct gen_spec *) *clone, newspec_set);
+ if (stat < 0) {
+ hp_free(*termrule(*clone));
+ hp_free(*clone);
+ return stat;
+ }
+
+ nextspec = rlp_nextspec(*clone);
+ assert(nextspec != NULL);
+
+ for (n = 0; n < (*clone)->num; n++) {
+ stat = rlp_clone_help(nextspec + n, newspec_set);
+ if (stat < 0) {
+ return stat;
+ }
+ }
+
+ if (HAS_WILDCARD_SPEC(*clone)) {
+ stat = rlp_clone_help(WILDCARD(*clone), newspec_set);
+ if (stat < 0) {
+ return stat;
+ }
+ }
+ return HE_OK;
+}
+
+/* necessary forward declaration */
+static hipac_error
+rlp_free_rec(struct rlp_spec *spec, int newspec_set, int direct_free);
+
+static inline hipac_error
+rlp_free_help(struct gen_spec *g, int newspec_set, int direct_free)
+{
+ int stat;
+
+ if (g == NULL) {
+ return HE_OK;
+ }
+ if (IS_RLP(g)) {
+ stat = rlp_free_rec((struct rlp_spec *) g, newspec_set,
+ direct_free);
+ if (stat < 0) {
+ return stat;
+ }
+ } else if (IS_ELEM(g)) {
+ if (direct_free) {
+ rlp_elem_free(g);
+ } else {
+ stat = history_obsolete(g, newspec_set);
+ if (stat < 0) {
+ return stat;
+ }
+ }
+ }
+ return HE_OK;
+}
+
+/* 'free' spec including the elementary interval layers recursively;
+ if direct_free is 0 'free' means to call history_obsolete for each element;
+ otherwise the elements are directly freed by rlp_elem_free;
+ possible errors: HE_LOW_MEMORY, HE_IMPOSSIBLE_CONDITION */
+static hipac_error
+rlp_free_rec(struct rlp_spec *spec, int newspec_set, int direct_free)
+{
+ struct gen_spec **nextspec = NULL;
+ int stat;
+ __u16 n;
+
+ if (unlikely(spec == NULL)) {
+ ARG_ERR;
+ }
+
+ nextspec = rlp_nextspec(spec);
+ assert(nextspec != NULL);
+
+ for (n = 0; n < spec->num; n++) {
+ stat = rlp_free_help(*(nextspec + n), newspec_set,
+ direct_free);
+ if (stat < 0) {
+ return stat;
+ }
+ }
+
+ if (HAS_WILDCARD_SPEC(spec)) {
+ stat = rlp_free_help(*WILDCARD(spec), newspec_set,
+ direct_free);
+ if (stat < 0) {
+ return stat;
+ }
+ }
+
+ if (direct_free) {
+ rlp_elem_free((struct gen_spec *) spec);
+ return HE_OK;
+ }
+ return history_obsolete((struct gen_spec *) spec, newspec_set);
+}
+
+/* return 1 if spec1 and spec2 are recursively equal; the headers spec1 and
+ spec2 are assumed to be equal */
+static int
+rlp_eq_rec(const struct rlp_spec *spec1, const struct rlp_spec *spec2)
+{
+ struct gen_spec **nextspec1 = NULL, **nextspec2 = NULL;
+ __u16 n;
+
+ if (unlikely(spec1 == NULL || spec2 == NULL)) {
+ ARG_ERR;
+ }
+
+ assert(IS_RLP(spec1));
+ assert(IS_RLP(spec2));
+ assert(rlp_spec_eq(spec1, spec2));
+
+ if (!ptrblock_eq(*termrule(spec1), *termrule(spec2))) {
+ return 0;
+ }
+ nextspec1 = rlp_nextspec(spec1);
+ assert(nextspec1 != NULL);
+ nextspec2 = rlp_nextspec(spec2);
+ assert(nextspec2 != NULL);
+
+ /* we don't need to compare the keys of spec1 and spec2 because for
+ each corresponding rlp pair the termrule blocks are compared
+ which means that if rlp_eq_rec finally returns 1 the same rules
+ terminate in the subtree rooted by the top level rlp spec1 and in
+ the subtree rooted by the top level rlp spec2; since all leaves
+ of the subtrees are terminal (NULL, rule or elementary interval
+ layer) we can conclude that there is no other rule except those in
+ the termrule blocks that have created keys in the rlps */
+ for (n = 0; n < spec1->num; n++) {
+ if (!rlp_rule_elem_eq(*(nextspec1 + n), *(nextspec2 + n))) {
+ return 0;
+ }
+ }
+
+ if (HAS_WILDCARD_SPEC(spec1) &&
+ !rlp_rule_elem_eq(*WILDCARD(spec1), *WILDCARD(spec2))) {
+ return 0;
+ }
+ return 1;
+}
+
+
+
+/*
+ * internal dimtree operations
+ */
+
+static inline hipac_error
+rlp_clone_ifneeded(struct rlp_spec *b, struct rlp_spec **newb,
+ int newspec_set)
+{
+ int stat;
+
+ if (unlikely(b == NULL || newb == NULL)) {
+ ARG_ERR;
+ }
+
+ if (b->newspec == 0) {
+ /* we must clone because b is visible for packet matching */
+ stat = rlp_clone(b, newb);
+ if (stat < 0) {
+ return stat;
+ }
+ stat = history_replace((struct gen_spec *) b,
+ (struct gen_spec *) *newb, newspec_set);
+ if (stat < 0) {
+ rlp_free(*newb);
+ return stat;
+ }
+ } else {
+ /* b can be modified directly */
+ *newb = b;
+ }
+ return HE_OK;
+}
+
+static inline hipac_error
+elem_clone_ifneeded(struct dt_elem *e, struct dt_elem **newe,
+ int newspec_set)
+{
+ int stat;
+
+ if (unlikely(e == NULL || newe == NULL)) {
+ ARG_ERR;
+ }
+
+ if (e->spec.newspec == 0) {
+ /* we must clone because e is visible for packet matching */
+ stat = elem_clone(e, newe);
+ if (stat < 0) {
+ return stat;
+ }
+ stat = history_replace((struct gen_spec *) e,
+ (struct gen_spec *) *newe, newspec_set);
+ if (stat < 0) {
+ elem_free(*newe);
+ return stat;
+ }
+ } else {
+ /* e can be modified directly */
+ *newe = e;
+ }
+ return HE_OK;
+}
+
+#ifdef DEBUG
+static void
+print_elem(struct dt_elem *e)
+{
+ int i;
+
+ DPRINT(DEBUG_DIMTREE, "term_rule: %p, ntm_rules:", e->term_rule);
+ if (e->ntm_rules.len == 0) {
+ DPRINT(DEBUG_DIMTREE, " => BUG");
+ return;
+ }
+ for (i = 0; i < e->ntm_rules.len; i++) {
+ DPRINT(DEBUG_DIMTREE, " %p", e->ntm_rules.p[i]);
+ }
+}
+
+static void
+print_rlp(struct rlp_spec *rlp)
+{
+ __u32 key = 0;
+ struct locate_inf inf;
+ int i;
+
+ if (rlp == NULL) {
+ DPRINT(DEBUG_DIMTREE, "rlp: %p (this might not be what you "
+ "expected)\n", rlp);
+ return;
+ }
+ if (!IS_RLP(rlp)) {
+ DPRINT(DEBUG_DIMTREE, "rlp: %p is __NOT__ a rlp => FATAL "
+ "ERROR\n", rlp);
+ return;
+ }
+ DPRINT(DEBUG_DIMTREE, "rlp: %p - bittype: %d, dimid: %d, "
+ "newspec: %d, num: %d\n", rlp, rlp->bittype, rlp->dimid,
+ rlp->newspec, rlp->num);
+ DPRINT(DEBUG_DIMTREE, " content:");
+ if (HAS_WILDCARD_DIM(rlp->dimid)) {
+ if (*WILDCARD(rlp) != NULL && IS_RULE(*WILDCARD(rlp))) {
+ DPRINT(DEBUG_DIMTREE, " (wc, %p: rule)",
+ *WILDCARD(rlp));
+ } else if (*WILDCARD(rlp) != NULL && IS_ELEM(*WILDCARD(rlp))) {
+ DPRINT(DEBUG_DIMTREE, " (wc, %p: ", *WILDCARD(rlp));
+ print_elem((struct dt_elem *) *WILDCARD(rlp));
+ DPRINT(DEBUG_DIMTREE, ")");
+ } else {
+ DPRINT(DEBUG_DIMTREE, " (wc, %p)", *WILDCARD(rlp));
+ }
+ }
+ do {
+ if (rlp_locate(rlp, &inf, key) < 0) {
+ DPRINT(DEBUG_DIMTREE, "\n%s: no memory for locate "
+ "info\n", __FUNCTION__);
+ return;
+ }
+ if (*inf.nextspec != NULL && IS_RULE(*inf.nextspec)) {
+ DPRINT(DEBUG_DIMTREE, " (%u, %p: rule)", inf.key,
+ *inf.nextspec);
+ } else if (*inf.nextspec != NULL &&
+ IS_ELEM(*inf.nextspec)) {
+ DPRINT(DEBUG_DIMTREE, " (%u, %p: ", inf.key,
+ *inf.nextspec);
+ print_elem((struct dt_elem *) *inf.nextspec);
+ DPRINT(DEBUG_DIMTREE, ")");
+ } else {
+ DPRINT(DEBUG_DIMTREE, " (%u, %p)", inf.key,
+ *inf.nextspec);
+ }
+ key = inf.key + 1;
+ } while (inf.key < MAXKEY(dim2btype[rlp->dimid]));
+ DPRINT(DEBUG_DIMTREE, "\n term:");
+ if (*termrule(rlp) == NULL) {
+ DPRINT(DEBUG_DIMTREE, " \n");
+ } else {
+ for (i = 0; i < (*termrule(rlp))->len; i++) {
+ DPRINT(DEBUG_DIMTREE, " %p", (*termrule(rlp))->p[i]);
+ }
+ DPRINT(DEBUG_DIMTREE, "\n");
+ }
+}
+#endif
+
+static inline hipac_error
+segment_insert_help(struct locate_inf *inf, __u8 *ins_num,
+ struct gen_spec* new_nextspec[], int newspec_set)
+{
+ int stat;
+
+ if (*inf->nextspec == NULL || IS_RULE(*inf->nextspec)) {
+ new_nextspec[*ins_num] = *inf->nextspec;
+ } else if (IS_ELEM(*inf->nextspec)) {
+ struct dt_elem *e;
+ stat = elem_clone((struct dt_elem *) *inf->nextspec, &e);
+ if (stat < 0) {
+ return stat;
+ }
+ stat = history_new((struct gen_spec *) e, newspec_set);
+ if (stat < 0) {
+ elem_free(e);
+ return stat;
+ }
+ new_nextspec[*ins_num] = (struct gen_spec *) e;
+ } else {
+ assert(IS_RLP(*inf->nextspec));
+ stat = rlp_clone_rec(
+ (struct rlp_spec *) *inf->nextspec,
+ (struct rlp_spec **) &new_nextspec[*ins_num],
+ newspec_set);
+ if (stat < 0) {
+ return stat;
+ }
+ }
+ (*ins_num)++;
+ return HE_OK;
+}
+
+/* segment [left, right] is inserted into spec which causes at most two new
+ elementary intervals being created; for every new elementary interval
+ the neighbour interval is cloned recursively */
+static inline hipac_error
+segment_insert(struct rlp_spec **spec, __u32 left, __u32 right,
+ int newspec_set)
+{
+ __u8 ins_num = 0;
+ struct gen_spec* new_nextspec[2];
+ struct locate_inf inf;
+ __u32 new_key[2];
+ int stat;
+
+ DPRINT(DEBUG_DIMTREE,
+ "----------------------------------------------------------\n");
+ DPRINT(DEBUG_DIMTREE, "%s: left: %u, right: %u, newspec_set: %d\n",
+ __FUNCTION__, left, right, newspec_set);
+#ifdef DEBUG
+ print_rlp(*spec);
+#endif
+ if (left > 0) {
+ stat = rlp_locate(*spec, &inf, left - 1);
+ if (stat < 0) {
+ return stat;
+ }
+ if (inf.key != left - 1) {
+ new_key[ins_num] = left - 1;
+ stat = segment_insert_help(&inf, &ins_num,
+ new_nextspec, newspec_set);
+ if (stat < 0) {
+ return stat;
+ }
+ }
+ }
+
+ stat = rlp_locate(*spec, &inf, right);
+ if (stat < 0) {
+ return stat;
+ }
+ if (inf.key != right) {
+ new_key[ins_num] = right;
+ stat = segment_insert_help(&inf, &ins_num, new_nextspec,
+ newspec_set);
+ if (stat < 0) {
+ return stat;
+ }
+ }
+
+ if (ins_num > 0) {
+ struct rlp_spec *b;
+ assert(ins_num == 1 || new_key[0] != new_key[1]);
+ if (ins_num == 1) {
+ DPRINT(DEBUG_DIMTREE, "new key: %u\n", new_key[0]);
+ } else {
+ DPRINT(DEBUG_DIMTREE, "new keys: %u, %u\n", new_key[0],
+ new_key[1]);
+ }
+ stat = rlp_insert(*spec, ins_num, new_key, new_nextspec, &b);
+ if (stat < 0) {
+ return stat;
+ }
+ stat = history_replace((struct gen_spec *) *spec,
+ (struct gen_spec *) b, newspec_set);
+ if (stat < 0) {
+ rlp_free(b);
+ return stat;
+ }
+ *spec = b;
+#ifdef DEBUG
+ print_rlp(*spec);
+#endif
+ } else {
+ /* we clone the rlp anyway if necessary */
+ struct rlp_spec *b;
+ stat = rlp_clone_ifneeded(*spec, &b, newspec_set);
+ if (stat < 0) {
+ return stat;
+ }
+ *spec = b;
+ }
+ return HE_OK;
+}
+
+/* forward declaration */
+static hipac_error
+dimtree_insrec(struct rlp_spec **spec, struct dt_rule *rule,
+ __u8 match_num, int newspec_set);
+
+static hipac_error
+dimtree_insrec_null(struct rlp_spec **spec, struct dt_rule *rule,
+ __u8 match_num, int newspec_set)
+{
+ const struct dt_match *match = ITH_DT_MATCH(rule, match_num);
+ __u8 bittype = dim2btype[match->dimid];
+ struct gen_spec *nextspec[] = {NULL};
+ __u32 key = MAXKEY(bittype);
+ struct locate_inf inf;
+ int stat;
+
+ /* create new rlp that defaults to policy and insert match
+ recursively */
+ assert(spec != NULL && *spec == NULL);
+ DPRINT(DEBUG_DIMTREE,
+ "----------------------------------------------------------\n");
+ DPRINT(DEBUG_DIMTREE, "%s: match_num: %d, newspec_set: %d, match: "
+ "(dimid: %d, left: %u, right: %u)\n", __FUNCTION__, match_num,
+ newspec_set, match->dimid, match->left, match->right);
+ DPRINT(DEBUG_DIMTREE, "%s: new rlp: bittype: %d, dimid: %d, key: "
+ "%u, nextspec: %p\n", __FUNCTION__, bittype, match->dimid, key,
+ *nextspec);
+ *spec = rlp_new(bittype, match->dimid, 1, &key, nextspec);
+ if (*spec == NULL) {
+ return HE_LOW_MEMORY;
+ }
+ stat = history_new((struct gen_spec *) *spec, newspec_set);
+ if (stat < 0) {
+ rlp_free(*spec);
+ return stat;
+ }
+
+ /* match must be non-wildcard */
+ assert(match->left > 0 || match->right < MAXKEY((*spec)->bittype));
+ stat = segment_insert(spec, match->left, match->right, newspec_set);
+ if (stat < 0) {
+ return stat;
+ }
+ stat = rlp_locate(*spec, &inf, match->right);
+ if (stat < 0) {
+ return stat;
+ }
+ if (match_num == rule->dt_match_len - 1) {
+ /* final match of rule -> insert rule into termrule block */
+ struct ptrblock **term = termrule(*spec);
+ stat = termrule_insert(term, rule);
+ if (stat < 0) {
+ return stat;
+ }
+ *inf.nextspec = (struct gen_spec *) rule;
+ } else {
+ /* before final match -> insert next match by recursion */
+ stat = dimtree_insrec_null((struct rlp_spec **)
+ inf.nextspec, rule, match_num + 1,
+ newspec_set);
+ if (stat < 0) {
+ return stat;
+ }
+ }
+ return HE_OK;
+}
+
+static hipac_error
+dimtree_insrec_rule_elem(struct dt_rule_elem_spec **spec, struct dt_rule *rule,
+ __u8 match_num, struct ptrblock *term_prop,
+ int newspec_set)
+{
+ struct dt_match *match = ITH_DT_MATCH(rule, match_num);
+ __u8 bittype = dim2btype[match->dimid];
+ __u32 key = MAXKEY(bittype);
+ struct gen_spec *nextspec[1];
+ struct rlp_spec *newspec;
+ struct ptrblock **term;
+ struct locate_inf inf;
+ int stat;
+
+ assert(spec != NULL);
+ assert(*spec != NULL);
+ assert(IS_RULE(*spec) || IS_ELEM(*spec));
+ assert(match->left > 0 || match->right < MAXKEY(bittype));
+
+ /* create new rlp and insert match recursively; term_prop propagates
+ through all dimension while remaining in each dimension as
+ termrule block; if anything goes wrong before term_prop is
+ attached to newspec term_prop will be freed; later it is treated
+ by the history */
+ DPRINT(DEBUG_DIMTREE,
+ "----------------------------------------------------------\n");
+ DPRINT(DEBUG_DIMTREE, "%s: match_num: %d, newspec_set: %d, match: "
+ "(dimid: %d, left: %u, right: %u)\n", __FUNCTION__, match_num,
+ newspec_set, match->dimid, match->left, match->right);
+ if (HAS_WILDCARD_DIM(match->dimid)) {
+ nextspec[0] = NULL;
+ DPRINT(DEBUG_DIMTREE, "%s: new rlp: bittype: %d, dimid: %d,"
+ " key: %u, nextspec: %p\n", __FUNCTION__, bittype,
+ match->dimid, key, *nextspec);
+ newspec = rlp_new(bittype, match->dimid, 1, &key, nextspec);
+ if (newspec == NULL) {
+ if (term_prop != NULL) {
+ ptrblock_free(term_prop);
+ }
+ return HE_LOW_MEMORY;
+ }
+ *WILDCARD(newspec) = (struct gen_spec *) *spec;
+ } else {
+ nextspec[0] = (struct gen_spec *) *spec;
+ DPRINT(DEBUG_DIMTREE, "%s: new rlp: bittype: %d, dimid: %d,"
+ " key: %u, nextspec: %p\n", __FUNCTION__, bittype,
+ match->dimid, key, *nextspec);
+ newspec = rlp_new(bittype, match->dimid, 1, &key, nextspec);
+ if (newspec == NULL) {
+ if (term_prop != NULL) {
+ ptrblock_free(term_prop);
+ }
+ return HE_LOW_MEMORY;
+ }
+ }
+ stat = history_new((struct gen_spec *) newspec, newspec_set);
+ if (stat < 0) {
+ rlp_free(newspec);
+ if (term_prop != NULL) {
+ ptrblock_free(term_prop);
+ }
+ return stat;
+ }
+ stat = segment_insert(&newspec, match->left, match->right,
+ newspec_set);
+ if (stat < 0) {
+ if (term_prop != NULL) {
+ ptrblock_free(term_prop);
+ }
+ return stat;
+ }
+ /* attach term_prop to newspec -> if anything goes wrong from now on
+ term_prop must not be freed here */
+ term = termrule(newspec);
+ *term = term_prop;
+ stat = rlp_locate(newspec, &inf, match->right);
+ if (stat < 0) {
+ return stat;
+ }
+
+ if (match_num == rule->dt_match_len - 1) {
+ /* final match of rule -> insert rule into termrule block */
+ stat = termrule_insert(term, rule);
+ if (stat < 0) {
+ return stat;
+ }
+ if (HAS_WILDCARD_DIM(match->dimid)) {
+ assert(*inf.nextspec == NULL);
+ *inf.nextspec = (struct gen_spec *) rule;
+ } else {
+ stat = rule_elem_insert((struct dt_rule_elem_spec **)
+ inf.nextspec, rule,
+ newspec_set);
+ if (stat < 0) {
+ return stat;
+ }
+ }
+ } else {
+ /* before final match -> insert next match by recursion */
+ if (*inf.nextspec == NULL) {
+ stat = dimtree_insrec_null((struct rlp_spec **)
+ inf.nextspec, rule,
+ match_num + 1, newspec_set);
+ if (stat < 0) {
+ return stat;
+ }
+ } else {
+ struct ptrblock *term_prop_clone = NULL;
+ if (term_prop != NULL) {
+ stat = ptrblock_clone(term_prop,
+ &term_prop_clone);
+ if (stat < 0) {
+ return stat;
+ }
+ }
+ stat = dimtree_insrec_rule_elem(
+ (struct dt_rule_elem_spec **) inf.nextspec,
+ rule, match_num + 1, term_prop_clone,
+ newspec_set);
+ if (stat < 0) {
+ return stat;
+ }
+ }
+ }
+ /* newspec is a rlp (not struct dt_rule_elem_spec *); the cast is
+ anyway necessary because of spec */
+ *spec = (struct dt_rule_elem_spec *) newspec;
+ return HE_OK;
+}
+
+static inline hipac_error
+dimtree_insrec_curdimid_sm_help(struct rlp_spec *spec, struct gen_spec **g,
+ struct dt_rule *rule, __u8 match_num,
+ __u32 right, __u8 wildcard, int newspec_set,
+ int do_cut)
+{
+ int stat;
+
+ if (*g == NULL) {
+ /* insert rule into policy interval */
+ stat = dimtree_insrec_null((struct rlp_spec **) g, rule,
+ match_num, newspec_set);
+ if (stat < 0) {
+ return stat;
+ }
+ } else if (IS_RLP(*g)) {
+ /* non-terminal case */
+ struct rlp_spec *b = (struct rlp_spec *) *g;
+
+ /* we don't have to clone if dimtree_insrec_curdimid_eq is
+ called by dimtree_insrec because segment_insert clones
+ the rlp anyway if necessary */
+ if ((b->dimid != ITH_DT_MATCH(rule, match_num)->dimid)) {
+ stat = rlp_clone_ifneeded(b, &b, newspec_set);
+ if (stat < 0) {
+ return stat;
+ }
+ }
+ stat = dimtree_insrec(&b, rule, match_num, newspec_set);
+ if (stat < 0) {
+ return stat;
+ }
+ *g = (struct gen_spec *) b;
+ } else {
+ /* the rules that terminate in g will propagate to termrule
+ blocks in below dimensions */
+ struct dt_rule_elem_spec **re;
+ struct ptrblock *term_prop;
+ assert(IS_ELEM(*g) || IS_RULE(*g));
+ stat = termrule_subset(*termrule(spec), &term_prop, right,
+ wildcard, spec->dimid);
+ if (stat < 0) {
+ return stat;
+ }
+ if (do_cut && *termrule(spec) != NULL && term_prop != NULL) {
+ /* remove all rules in term_prop from current
+ termrule block */
+ stat = termrule_cut(termrule(spec), term_prop);
+ if (stat < 0) {
+ ptrblock_free(term_prop);
+ return stat;
+ }
+ }
+
+ re = (struct dt_rule_elem_spec **) g;
+ if (IS_ELEM(*re)) {
+ struct dt_elem *e;
+ stat = elem_clone_ifneeded((struct dt_elem *) *re, &e,
+ newspec_set);
+ if (stat < 0) {
+ if (term_prop != NULL) {
+ ptrblock_free(term_prop);
+ }
+ return stat;
+ }
+ *re = (struct dt_rule_elem_spec *) e;
+ }
+ stat = dimtree_insrec_rule_elem(re, rule, match_num,
+ term_prop, newspec_set);
+ if (stat < 0) {
+ /* term_prop was freed by
+ dimtree_insrec_rule_elem */
+ return stat;
+ }
+ }
+ return HE_OK;
+}
+
+static hipac_error
+dimtree_insrec_curdimid_sm(struct rlp_spec **spec, struct dt_rule *rule,
+ __u8 match_num, int newspec_set)
+{
+ __u32 key = 0;
+ __u32 maxkey = MAXKEY((*spec)->bittype);
+ struct locate_inf inf;
+ int stat;
+
+ assert(spec != NULL);
+ assert(*spec != NULL);
+ assert(IS_RLP(*spec));
+ assert(match_num < rule->dt_match_len);
+ /* insert it into every elementary interval respectively the wildcard
+ pointer */
+ DPRINT(DEBUG_DIMTREE,
+ "----------------------------------------------------------\n");
+ DPRINT(DEBUG_DIMTREE, "%s: match_num: %d, newspec_set: %d\n",
+ __FUNCTION__, match_num, newspec_set);
+#ifdef DEBUG
+ print_rlp(*spec);
+#endif
+ if (HAS_WILDCARD_SPEC(*spec)) {
+ return dimtree_insrec_curdimid_sm_help(
+ *spec, WILDCARD(*spec), rule, match_num, 0, 1,
+ newspec_set, 1);
+ }
+
+ do {
+ stat = rlp_locate(*spec, &inf, key);
+ if (stat < 0) {
+ return stat;
+ }
+ key = inf.key + 1;
+ stat = dimtree_insrec_curdimid_sm_help(
+ *spec, inf.nextspec, rule, match_num, inf.key, 0,
+ newspec_set, 0);
+ if (stat < 0) {
+ return stat;
+ }
+ } while (inf.key < maxkey);
+
+ if (*termrule(*spec) != NULL) {
+ /* by inserting rule into every elementary interval the
+ dimension becomes completely nonterminating */
+ ptrblock_free(*termrule(*spec));
+ *termrule(*spec) = NULL;
+ }
+ return HE_OK;
+}
+
+/* necessary forward declaration */
+static hipac_error
+dimtree_insrec_curdimid_eq_tm(struct rlp_spec **spec, struct dt_rule *rule,
+ __u32 left, __u32 right, int newspec_set);
+
+static inline hipac_error
+dimtree_insrec_curdimid_eq_tm_help(struct gen_spec **g, struct dt_rule *rule,
+ struct ptrblock **term, __u8 *ins_termrule,
+ int newspec_set)
+{
+ int stat;
+
+ if (*g != NULL && IS_RLP(*g)) {
+ /* non-terminal case */
+ struct rlp_spec *b;
+
+ stat = rlp_clone_ifneeded((struct rlp_spec *) *g, &b,
+ newspec_set);
+ if (stat < 0) {
+ return stat;
+ }
+ stat = dimtree_insrec_curdimid_eq_tm(
+ &b, rule, 0, MAXKEY(b->bittype), newspec_set);
+ if (stat < 0) {
+ return stat;
+ }
+ *g = (struct gen_spec *) b;
+ } else {
+ /* beyond final match of rule -> insert rule into
+ termrule block if not already inserted */
+ struct dt_rule_elem_spec **re;
+ if (*ins_termrule) {
+ stat = termrule_insert(term, rule);
+ if (stat < 0) {
+ return stat;
+ }
+ *ins_termrule = 0;
+ }
+
+ re = (struct dt_rule_elem_spec **) g;
+ if (*re != NULL && IS_ELEM(*re)) {
+ struct dt_elem *e;
+ stat = elem_clone_ifneeded((struct dt_elem *) *re, &e,
+ newspec_set);
+ if (stat < 0) {
+ return stat;
+ }
+ *re = (struct dt_rule_elem_spec *) e;
+ }
+ stat = rule_elem_insert(re, rule, newspec_set);
+ if (stat < 0) {
+ return stat;
+ }
+ }
+ return HE_OK;
+}
+
+static hipac_error
+dimtree_insrec_curdimid_eq_tm(struct rlp_spec **spec, struct dt_rule *rule,
+ __u32 left, __u32 right, int newspec_set)
+{
+ __u8 ins_termrule = 1;
+ struct ptrblock **term = termrule(*spec);
+ struct locate_inf inf;
+ __u32 key = left;
+ int stat;
+
+ DPRINT(DEBUG_DIMTREE,
+ "----------------------------------------------------------\n");
+ DPRINT(DEBUG_DIMTREE, "%s: left: %u, right: %u, newspec_set: %d\n",
+ __FUNCTION__, left, right, newspec_set);
+#ifdef DEBUG
+ print_rlp(*spec);
+#endif
+ if (HAS_WILDCARD_SPEC(*spec) && left == 0 &&
+ right == MAXKEY((*spec)->bittype)) {
+ /* insert wildcard match into wildcard dimension */
+ return dimtree_insrec_curdimid_eq_tm_help(
+ WILDCARD(*spec), rule, term, &ins_termrule,
+ newspec_set);
+ }
+
+ /* iterate over every elementary interval between left and right
+ and check if rule is better than the current or recurse if
+ elementary interval is non-terminating */
+ do {
+ stat = rlp_locate(*spec, &inf, key);
+ if (stat < 0) {
+ return stat;
+ }
+ key = inf.key + 1;
+ stat = dimtree_insrec_curdimid_eq_tm_help(
+ inf.nextspec, rule, term, &ins_termrule,
+ newspec_set);
+ if (stat < 0) {
+ return stat;
+ }
+ } while (inf.key < right);
+ return HE_OK;
+}
+
+static hipac_error
+dimtree_insrec_curdimid_eq(struct rlp_spec **spec, struct dt_rule *rule,
+ const struct dt_match *match, __u8 match_num,
+ int newspec_set)
+{
+ __u32 key = match->left;
+ struct locate_inf inf;
+ int stat;
+
+ /* match must be non-wildcard */
+ assert(match->left > 0 || match->right < MAXKEY((*spec)->bittype));
+ DPRINT(DEBUG_DIMTREE,
+ "----------------------------------------------------------\n");
+ DPRINT(DEBUG_DIMTREE, "%s: match_num: %d, newspec_set: %d, match: "
+ "(dimid: %d, left: %u, right: %u)\n", __FUNCTION__, match_num,
+ newspec_set, match->dimid, match->left, match->right);
+#ifdef DEBUG
+ print_rlp(*spec);
+#endif
+ stat = segment_insert(spec, match->left, match->right, newspec_set);
+ if (stat < 0) {
+ return stat;
+ }
+
+ /* insert match and iterate over every overlapped interval */
+ if (match_num == rule->dt_match_len - 1) {
+ /* final match of rule */
+ stat = dimtree_insrec_curdimid_eq_tm(
+ spec, rule, match->left, match->right, newspec_set);
+ if (stat < 0) {
+ return stat;
+ }
+ } else {
+ /* before final match of rule */
+ do {
+ stat = rlp_locate(*spec, &inf, key);
+ if (stat < 0) {
+ return stat;
+ }
+ key = inf.key + 1;
+
+ if (*inf.nextspec == NULL) {
+ /* insert rule into policy interval */
+ stat = dimtree_insrec_null(
+ (struct rlp_spec **) inf.nextspec,
+ rule, match_num + 1, newspec_set);
+ if (stat < 0) {
+ return stat;
+ }
+ } else if (IS_RLP(*inf.nextspec)) {
+ /* non-terminal case */
+ struct rlp_spec *b = (struct rlp_spec *)
+ *inf.nextspec;
+
+ /* we don't have to clone if
+ dimtree_insrec_curdimid_eq is called by
+ dimtree_insrec because segment_insert
+ clones the rlp anyway if necessary */
+ if (b->dimid !=
+ ITH_DT_MATCH(rule, match_num + 1)->dimid) {
+ stat = rlp_clone_ifneeded(
+ (struct rlp_spec *)
+ *inf.nextspec, &b,
+ newspec_set);
+ if (stat < 0) {
+ return stat;
+ }
+ }
+ stat = dimtree_insrec(
+ &b, rule, match_num + 1, newspec_set);
+ if (stat < 0) {
+ return stat;
+ }
+ *inf.nextspec = (struct gen_spec *) b;
+ } else {
+ /* the rules that terminate in the current
+ elementary interval will propagate to
+ termrule blocks in below dimensions */
+ struct dt_rule_elem_spec **re;
+ struct ptrblock *term_prop;
+ stat = termrule_subset(
+ *termrule(*spec), &term_prop, inf.key,
+ 0, (*spec)->dimid);
+ if (stat < 0) {
+ if (term_prop != NULL) {
+ ptrblock_free(term_prop);
+ }
+ return stat;
+ }
+ re = (struct dt_rule_elem_spec **)
+ inf.nextspec;
+ if (IS_ELEM(*re)) {
+ struct dt_elem *e;
+ stat = elem_clone_ifneeded(
+ (struct dt_elem *) *re, &e,
+ newspec_set);
+ if (stat < 0) {
+ if (term_prop != NULL) {
+ ptrblock_free(
+ term_prop);
+ }
+ return stat;
+ }
+ *re = (struct dt_rule_elem_spec *) e;
+ }
+ stat = dimtree_insrec_rule_elem(
+ re, rule, match_num + 1, term_prop,
+ newspec_set);
+ if (stat < 0) {
+ /* term_prop was freed by
+ dimtree_insrec_rule_elem */
+ return stat;
+ }
+ }
+ } while (inf.key < match->right);
+
+ /* as the rule continues we can be sure that every terminating
+ rule whose match in the current dimension is completely
+ overlapped by match can be removed from the termrule block;
+ we possibly forget to remove rules with partially overlapped
+ matches but this does NOT cause any harm and the case should
+ be very rare */
+ stat = termrule_delete_ovl(termrule(*spec), match->left,
+ match->right, (*spec)->dimid);
+ if (stat < 0) {
+ return stat;
+ }
+ }
+ return HE_OK;
+}
+
+static hipac_error
+dimtree_insrec_curdimid_gr(struct rlp_spec **spec, struct dt_rule *rule,
+ const struct dt_match *match, __u8 match_num,
+ int newspec_set)
+{
+ __u8 bittype = dim2btype[match->dimid];
+ __u32 key = MAXKEY(bittype);
+ struct gen_spec *nextspec[1];
+ struct rlp_spec *newspec;
+ int stat;
+
+ /* create missing dimension and insert current match by recursion */
+ DPRINT(DEBUG_DIMTREE,
+ "----------------------------------------------------------\n");
+ DPRINT(DEBUG_DIMTREE, "%s: match_num: %d, newspec_set: %d, match: "
+ "(dimid: %d, left: %u, right: %u)\n", __FUNCTION__, match_num,
+ newspec_set, match->dimid, match->left, match->right);
+#ifdef DEBUG
+ print_rlp(*spec);
+#endif
+ if (HAS_WILDCARD_DIM(match->dimid)) {
+ nextspec[0] = NULL;
+ DPRINT(DEBUG_DIMTREE, "%s: new rlp: bittype: %d, dimid: %d,"
+ " key: %u, nextspec: %p\n", __FUNCTION__, bittype,
+ match->dimid, key, *nextspec);
+ newspec = rlp_new(bittype, match->dimid, 1, &key, nextspec);
+ if (newspec == NULL) {
+ return HE_LOW_MEMORY;
+ }
+ *WILDCARD(newspec) = (struct gen_spec *) *spec;
+ } else {
+ nextspec[0] = (struct gen_spec *) *spec;
+ DPRINT(DEBUG_DIMTREE, "%s: new rlp: bittype: %d, dimid: %d,"
+ " key: %u, nextspec: %p\n", __FUNCTION__, bittype,
+ match->dimid, key, *nextspec);
+ newspec = rlp_new(bittype, match->dimid, 1, &key, nextspec);
+ if (newspec == NULL) {
+ return HE_LOW_MEMORY;
+ }
+ }
+ stat = history_new((struct gen_spec *) newspec, newspec_set);
+ if (stat < 0) {
+ rlp_free(newspec);
+ return stat;
+ }
+ *spec = newspec;
+ return dimtree_insrec(spec, rule, match_num, newspec_set);
+}
+
+static hipac_error
+dimtree_insrec(struct rlp_spec **spec, struct dt_rule *rule,
+ __u8 match_num, int newspec_set)
+{
+ struct dt_match *match;
+
+ /* spec non-terminating */
+ assert(spec != NULL);
+ assert(*spec != NULL);
+ assert(IS_RLP(*spec));
+
+ /* rule is not finished yet */
+ assert(match_num < rule->dt_match_len);
+
+ match = ITH_DT_MATCH(rule, match_num);
+
+ DPRINT(DEBUG_DIMTREE,
+ "----------------------------------------------------------\n");
+ DPRINT(DEBUG_DIMTREE, "%s: match_num: %d, newspec_set: %d, match: "
+ "(dimid: %d, left: %u, right: %u)\n", __FUNCTION__, match_num,
+ newspec_set, match->dimid, match->left, match->right);
+#ifdef DEBUG
+ print_rlp(*spec);
+#endif
+ if ((*spec)->dimid < match->dimid) {
+ /* match in current dimension treated as wildcard because there
+ is no match for the current dimension */
+ return dimtree_insrec_curdimid_sm(spec, rule, match_num,
+ newspec_set);
+ } else if ((*spec)->dimid == match->dimid) {
+ /* there is a match in the current dimension which is per
+ default no wildcard */
+ return dimtree_insrec_curdimid_eq(spec, rule, match,
+ match_num, newspec_set);
+
+ } else {
+ /* the dimension of the current match has not yet been
+ created */
+ return dimtree_insrec_curdimid_gr(spec, rule, match,
+ match_num, newspec_set);
+ }
+}
+
+static inline hipac_error
+segment_delete_help(struct rlp_spec *spec, struct locate_inf *bound1,
+ __u32 lkey, __u32 dkey, __u32 del_key[], __u8 *del_num,
+ int newspec_set)
+{
+ struct gen_spec *current1, *current2;
+ struct locate_inf bound2;
+ int stat;
+
+ stat = rlp_locate(spec, &bound2, lkey);
+ if (stat < 0) {
+ return stat;
+ }
+ current1 = *bound1->nextspec;
+ current2 = *bound2.nextspec;
+ switch (rlp_rule_elem_eq(current1, current2)) {
+ case 1:
+ if (current1 == NULL ||
+ !termrule_exists(*termrule(spec), spec->dimid, dkey)) {
+ del_key[(*del_num)++] = dkey;
+ }
+ break;
+ case 2:
+ if (!termrule_exists(*termrule(spec), spec->dimid, dkey)) {
+ history_obsolete(current1, newspec_set);
+ del_key[(*del_num)++] = dkey;
+ }
+ break;
+ case 3:
+ del_key[(*del_num)++] = dkey;
+ stat = rlp_free_rec((struct rlp_spec *) current1,
+ newspec_set, 0);
+ if (stat < 0) {
+ return stat;
+ }
+ break;
+ default:
+ break;
+ }
+ return HE_OK;
+}
+
+/* segment [left, right] is deleted from spec if the neighbours of left and
+ right point to the same spec; at most two elementary intervals can be
+ deleted */
+static inline hipac_error
+segment_delete(struct rlp_spec **spec, __u32 left, __u32 right,
+ int newspec_set)
+{
+ __u8 del_num = 0;
+ __u32 maxkey = MAXKEY((*spec)->bittype);
+ __u32 del_key[2] = {0, 0};
+ struct locate_inf bound1;
+ int stat;
+
+ DPRINT(DEBUG_DIMTREE,
+ "----------------------------------------------------------\n");
+ DPRINT(DEBUG_DIMTREE, "%s: left: %u, right: %u, newspec_set: %d\n",
+ __FUNCTION__, left, right, newspec_set);
+#ifdef DEBUG
+ print_rlp(*spec);
+#endif
+ if (left > 0) {
+ stat = rlp_locate(*spec, &bound1, left - 1);
+ if (stat < 0) {
+ return stat;
+ }
+ assert(bound1.key == left - 1);
+ stat = segment_delete_help(*spec, &bound1, left, left - 1,
+ del_key, &del_num, newspec_set);
+ if (stat < 0) {
+ return stat;
+ }
+ }
+
+ if (right < maxkey) {
+ stat = rlp_locate(*spec, &bound1, right);
+ if (stat < 0) {
+ return stat;
+ }
+ assert(bound1.key == right);
+ stat = segment_delete_help(*spec, &bound1, right + 1, right,
+ del_key, &del_num, newspec_set);
+ if (stat < 0) {
+ return stat;
+ }
+ }
+
+ if (del_num > 0) {
+ struct rlp_spec *b;
+ assert(del_num == 1 || del_key[0] < del_key[1]);
+ if (del_num == 1) {
+ DPRINT(DEBUG_DIMTREE, "del key: %u\n", del_key[0]);
+ } else {
+ DPRINT(DEBUG_DIMTREE, "del keys: %u, %u\n",
+ del_key[0], del_key[1]);
+ }
+ stat = rlp_delete(*spec, del_num, del_key, &b);
+ if (stat < 0) {
+ return stat;
+ }
+ stat = history_replace((struct gen_spec *) *spec,
+ (struct gen_spec *) b, newspec_set);
+ if (stat < 0) {
+ rlp_free(b);
+ return stat;
+ }
+ *spec = b;
+ }
+ return HE_OK;
+}
+
+/* forward declaration needed for dimtree_delrec_interval */
+static hipac_error
+dimtree_delrec(struct rlp_spec **spec, const struct dt_rule *rule,
+ __u8 match_num, struct ptrlist *term_prop, int newspec_set);
+
+static inline hipac_error
+dimtree_delrec_interval(struct gen_spec **spec, const struct dt_rule *rule,
+ __u8 match_num, struct ptrlist *tmpterm,
+ struct ptrblock **term, __u32 right, __u8 wildcard,
+ __u8 dimid, int newspec_set)
+{
+ int stat;
+
+ assert(*spec != NULL);
+ if (IS_RLP(*spec)) {
+ /* non-terminal case */
+ struct rlp_spec *b;
+
+ stat = rlp_clone_ifneeded((struct rlp_spec *) *spec, &b,
+ newspec_set);
+ if (stat < 0) {
+ return stat;
+ }
+ stat = dimtree_delrec(&b, rule, match_num, tmpterm,
+ newspec_set);
+ if (stat < 0) {
+ return stat;
+ }
+ *spec = (struct gen_spec *) b;
+ } else {
+ struct dt_rule_elem_spec **re =
+ (struct dt_rule_elem_spec **) spec;
+
+ if (IS_ELEM(*re)) {
+ struct dt_elem *e;
+ stat = elem_clone_ifneeded((struct dt_elem *) *re, &e,
+ newspec_set);
+ if (stat < 0) {
+ return stat;
+ }
+ *re = (struct dt_rule_elem_spec *) e;
+ }
+ stat = rule_elem_delete(re, rule, *term, right, wildcard,
+ dimid, newspec_set);
+ if (stat < 0) {
+ return stat;
+ }
+ }
+ return HE_OK;
+}
+
+static hipac_error
+dimtree_delrec(struct rlp_spec **spec, const struct dt_rule *rule,
+ __u8 match_num, struct ptrlist *term_prop, int newspec_set)
+{
+ /* current match is initialized as wildcard */
+ __u32 left = 0;
+ __u32 key = 0;
+ __u32 maxkey = MAXKEY((*spec)->bittype);
+ __u8 match_is_wildcard = 1;
+
+ /* collects all terminating specs from the below dimension */
+ struct ptrlist *tmpterm;
+ struct ptrblock **term;
+ struct locate_inf inf;
+ int stat;
+
+#ifdef DEBUG
+ DPRINT(DEBUG_DIMTREE,
+ "----------------------------------------------------------\n");
+ if (match_num < rule->dt_match_len) {
+ const struct dt_match *match = ITH_DT_MATCH(rule, match_num);
+ DPRINT(DEBUG_DIMTREE, "%s: match_num: %d, newspec_set: %d, "
+ "le: %p, match: (dimid: %d, left: %u, right: %u)\n",
+ __FUNCTION__, match_num, newspec_set, rule,
+ match->dimid, match->left, match->right);
+ } else {
+ DPRINT(DEBUG_DIMTREE, "%s: match_num: %d, newspec_set: %d, "
+ "rule: %p, match: \n", __FUNCTION__, match_num,
+ newspec_set, rule);
+ }
+ print_rlp(*spec);
+#endif
+ tmpterm = tmp_termrule_new();
+ if (tmpterm == NULL) {
+ return HE_LOW_MEMORY;
+ }
+ term = termrule(*spec);
+
+ /* dimtree_delrec is never called for terminal cases */
+ assert(*spec != NULL);
+ assert(IS_RLP(*spec));
+
+ if (match_num < rule->dt_match_len) {
+ /* rule is not finished yet */
+ const struct dt_match *match = ITH_DT_MATCH(rule, match_num);
+
+ if ((*spec)->dimid == match->dimid) {
+ /* match must be non-wildcard */
+ assert(match->left > 0 ||
+ match->right < MAXKEY((*spec)->bittype));
+ key = left = match->left;
+ maxkey = match->right;
+ match_is_wildcard = 0;
+ match_num++;
+ }
+ }
+
+ if (HAS_WILDCARD_SPEC(*spec) && match_is_wildcard) {
+ assert(*WILDCARD(*spec) != NULL);
+ stat = dimtree_delrec_interval(
+ WILDCARD(*spec), rule, match_num, tmpterm, term,
+ 0, 1, (*spec)->dimid, newspec_set);
+ if (stat < 0) {
+ goto error;
+ }
+ } else {
+ do {
+ stat = rlp_locate(*spec, &inf, key);
+ if (stat < 0) {
+ goto error;
+ }
+ key = inf.key + 1;
+ assert(*inf.nextspec != NULL);
+ stat = dimtree_delrec_interval(
+ inf.nextspec, rule, match_num, tmpterm, term,
+ inf.key, 0, (*spec)->dimid, newspec_set);
+ if (stat < 0) {
+ goto error;
+ }
+ } while (inf.key < maxkey);
+ }
+
+ /* delete rule from termrule block if it is there */
+ stat = termrule_delete(term, rule);
+ if (stat < 0) {
+ goto error;
+ }
+
+ /* merge temporary termrule list with termrule block */
+ stat = termrule_merge(term, tmpterm);
+ if (stat < 0) {
+ return stat;
+ }
+
+ if (!match_is_wildcard) {
+ /* remove surrounding elementary intervals represented by left
+ and maxkey if necessary */
+ stat = segment_delete(spec, left, maxkey, newspec_set);
+ if (stat < 0) {
+ /* tmpterm is already freed */
+ return stat;
+ }
+ term = termrule(*spec);
+ }
+
+ if ((*spec)->num == 1) {
+ /* spec is empty => drop it */
+ struct gen_spec *nextspec;
+
+ if (HAS_WILDCARD_SPEC(*spec)) {
+ assert((stat = rlp_locate(*spec, &inf, 0),
+ stat < 0 ? 1 : *inf.nextspec == NULL));
+ nextspec = *WILDCARD(*spec);
+ } else {
+ stat = rlp_locate(*spec, &inf, 0);
+ if (stat < 0) {
+ /* tmpterm is already freed */
+ return stat;
+ }
+ nextspec = *inf.nextspec;
+ }
+
+ if (*term != NULL && term_prop != NULL) {
+ stat = tmp_termrule_merge(term_prop, *term);
+ if (stat < 0) {
+ /* tmpterm is already freed */
+ return stat;
+ }
+ }
+ stat = history_obsolete((struct gen_spec *) *spec,
+ newspec_set);
+ if (stat < 0) {
+ /* tmpterm is already freed */
+ return stat;
+ }
+
+ if (nextspec == NULL || IS_RULE(nextspec)) {
+ *spec = (struct rlp_spec *) nextspec;
+ } else if (IS_RLP(nextspec)) {
+ struct rlp_spec *b;
+
+ stat = rlp_clone_ifneeded((struct rlp_spec *)
+ nextspec, &b, newspec_set);
+ if (stat < 0) {
+ return stat;
+ }
+ *spec = (struct rlp_spec *) b;
+ } else {
+ struct dt_elem *e;
+ assert(IS_ELEM(nextspec));
+ stat = elem_clone_ifneeded((struct dt_elem *)
+ nextspec, &e, newspec_set);
+ if (stat < 0) {
+ return stat;
+ }
+ *spec = (struct rlp_spec *) e;
+ }
+ }
+ return HE_OK;
+
+ error:
+ tmp_termrule_free(tmpterm);
+ return stat;
+}
+
+
+
+/*
+ * public dimtree operations
+ */
+
+hipac_error
+dimtree_new(struct dimtree **newdt, __u32 origin, const char *chain_name,
+ struct dt_rule *dummy, struct dt_rule *policy)
+{
+ struct dt_chain *chain;
+
+ if (unlikely(newdt == NULL || chain_name == NULL || dummy == NULL ||
+ policy == NULL || dummy->spec.action != TARGET_DUMMY ||
+ !IS_RULE_TERM(policy) || policy->dt_match_len != 0)) {
+ ARG_ERR;
+ }
+ *newdt = hp_alloc(sizeof(**newdt), 1);
+ if (*newdt == NULL) {
+ return HE_LOW_MEMORY;
+ }
+ chain = hp_alloc(sizeof(*chain), 1);
+ if (chain == NULL) {
+ hp_free(*newdt);
+ *newdt = NULL;
+ return HE_LOW_MEMORY;
+ }
+ INIT_LIST_HEAD(&chain->head);
+ strncpy(chain->name, chain_name, sizeof(chain->name));
+ chain->name[sizeof(chain->name) - 1] = '\0';
+ chain->first = policy;
+ chain->len = 2;
+ list_add(&policy->head, &chain->head);
+ list_add(&dummy->head, &chain->head);
+
+ (*newdt)->origin = origin;
+ (*newdt)->top = (struct gen_spec *) policy;
+ (*newdt)->top_new = NULL;
+ (*newdt)->need_commit = 0;
+ (*newdt)->chain = chain;
+ return HE_OK;
+}
+
+void
+dimtree_free(struct dimtree *dt)
+{
+ struct list_head *lh;
+ struct dt_rule *rule;
+
+ if (unlikely(dt == NULL)) {
+ ARG_MSG;
+ return;
+ }
+ if (dt->top != NULL) {
+ if (IS_RLP(dt->top)) {
+ rlp_free_rec((struct rlp_spec *) dt->top, 0, 1);
+ } else if (IS_ELEM(dt->top)) {
+ elem_free((struct dt_elem *) dt->top);
+ }
+ }
+ for (lh = dt->chain->head.next; lh != &dt->chain->head;) {
+ rule = list_entry(lh, struct dt_rule, head);
+ lh = lh->next;
+ if (rule->exec_match != NULL) {
+ ptrblock_free(rule->exec_match);
+ }
+ hp_free(rule);
+ }
+ hp_free(dt->chain);
+ hp_free(dt);
+}
+
+void
+dimtree_flush(struct dimtree *dt)
+{
+ struct gen_spec *top;
+ struct list_head *lh;
+ struct dt_rule *rule;
+
+ if (unlikely(dt == NULL)) {
+ ARG_MSG;
+ return;
+ }
+ assert(dt->chain->len >= 2);
+ assert(list_entry(dt->chain->head.next,
+ struct dt_rule, head)->spec.action == TARGET_DUMMY);
+ top = dt->top;
+ dt->top = (struct gen_spec *) list_entry(dt->chain->head.prev,
+ struct dt_rule, head);
+ ((struct dt_rule *) dt->top)->spec.pos = 1;
+ dt->need_commit = 0;
+ synchronize_rcu();
+ if (top != NULL) {
+ if (IS_RLP(top)) {
+ rlp_free_rec((struct rlp_spec *) top, 0, 1);
+ } else if (IS_ELEM(top)) {
+ elem_free((struct dt_elem *) top);
+ }
+ }
+ for (lh = dt->chain->head.next->next; lh != dt->chain->head.prev;) {
+ rule = list_entry(lh, struct dt_rule, head);
+ lh = lh->next;
+ list_del(lh->prev);
+ if (rule->exec_match != NULL) {
+ ptrblock_free(rule->exec_match);
+ }
+ hp_free(rule);
+ }
+ dt->chain->first = list_entry(dt->chain->head.prev, struct dt_rule,
+ head);
+ dt->chain->len = 2;
+}
+
+const char *
+dimtree_get_chain_name(const struct dimtree *dt)
+{
+ if (unlikely(dt == NULL)) {
+ ARG_MSG;
+ return NULL;
+ }
+ return dt->chain->name;
+}
+
+static hipac_error
+dimtree_insert_intern(struct dimtree *dt, struct dt_rule *rule, __u32 origin,
+ int inc, int insert_chain, int commit)
+{
+ struct gen_spec *top;
+ struct list_head *lh;
+ struct dt_rule *r;
+ int stat;
+
+ if (unlikely(dt == NULL || rule == NULL ||
+ rule->spec.pos <= 0 ||
+ rule->spec.pos >
+ list_entry(dt->chain->head.prev,
+ struct dt_rule, head)->spec.pos ||
+ (IS_TARGET_DUMMY(rule) && !insert_chain))) {
+ ARG_ERR;
+ }
+
+ /* insert rule into dt_chain */
+ assert(!rule->deleted);
+ if (insert_chain) {
+ if (likely(inc)) {
+ for (lh = dt->chain->head.prev; lh != &dt->chain->head;
+ lh = lh->prev) {
+ r = list_entry(lh, struct dt_rule, head);
+ if (r->spec.pos < rule->spec.pos) {
+ break;
+ }
+ r->spec.pos++;
+ }
+ list_add(&rule->head, lh);
+ } else {
+ __u32 maxpos = list_entry(dt->chain->head.prev,
+ struct dt_rule,
+ head)->spec.pos;
+ if (((maxpos + 1) * rule->spec.pos) / dt->chain->len <
+ dt->chain->len >> 1) {
+ list_for_each (lh, &dt->chain->head) {
+ r = list_entry(lh, struct dt_rule,
+ head);
+ if (r->spec.pos > rule->spec.pos) {
+ break;
+ }
+ }
+ list_add_tail(&rule->head, lh);
+ } else {
+ for (lh = dt->chain->head.prev;
+ lh != &dt->chain->head; lh = lh->prev) {
+ r = list_entry(lh, struct dt_rule,
+ head);
+ if (r->spec.pos <= rule->spec.pos) {
+ break;
+ }
+ }
+ list_add(&rule->head, lh);
+ }
+ }
+ dt->chain->len++;
+ if (IS_TARGET_DUMMY(rule)) {
+ return HE_OK;
+ }
+ }
+
+ /* origin check */
+ if (!(dt->origin & origin)) {
+ return HE_RULE_ORIGIN_MISMATCH;
+ }
+
+ if (!dt->need_commit) {
+ /* first operation in a series => clone top level structure
+ if necessary */
+ if (dt->top == NULL) {
+ top = NULL;
+ } else if (IS_RLP(dt->top)) {
+ stat = rlp_clone((struct rlp_spec *) dt->top,
+ (struct rlp_spec **) &top);
+ if (stat < 0) {
+ return stat;
+ }
+ stat = history_replace(dt->top, top, !commit);
+ if (stat < 0) {
+ rlp_free((struct rlp_spec *) top);
+ history_undo();
+ return stat;
+ }
+ } else if (IS_ELEM(dt->top)) {
+ stat = elem_clone((struct dt_elem *) dt->top,
+ (struct dt_elem **) &top);
+ if (stat < 0) {
+ return stat;
+ }
+ stat = history_replace(dt->top, top, !commit);
+ if (stat < 0) {
+ elem_free((struct dt_elem *) top);
+ history_undo();
+ return stat;
+ }
+ } else {
+ assert(IS_RULE(dt->top));
+ top = dt->top;
+ }
+ } else {
+ top = dt->top_new;
+ }
+
+ /* insert rule into rlp */
+ if (rule->dt_match_len == 0) {
+ /* rule has no native matches at all */
+ if (top != NULL && IS_RLP(top)) {
+ stat = dimtree_insrec_curdimid_eq_tm(
+ (struct rlp_spec **) &top, rule, 0,
+ MAXKEY(dim2btype[((struct rlp_spec *)
+ top)->dimid]), !commit);
+ } else {
+ stat = rule_elem_insert((struct dt_rule_elem_spec **)
+ &top, rule, !commit);
+ }
+ } else {
+ /* rule has at least one native match */
+ if (top == NULL) {
+ stat = dimtree_insrec_null((struct rlp_spec **) &top,
+ rule, 0, !commit);
+ } else if (IS_RLP(top)) {
+ stat = dimtree_insrec((struct rlp_spec **) &top,
+ rule, 0, !commit);
+ } else {
+ /* construct termrule block containing all
+ non TARGET_DUMMY rules except the inserted rule
+ from dt->chain */
+ struct ptrblock *term_prop = NULL;
+ struct list_head *lh;
+ struct dt_rule *r;
+
+ stat = HE_OK;
+ list_for_each (lh, &dt->chain->head) {
+ r = list_entry(lh, struct dt_rule, head);
+ if (r->spec.action == TARGET_DUMMY ||
+ r == rule || r->deleted) {
+ continue;
+ }
+ assert(r->dt_match_len == 0);
+ stat = termrule_insert(&term_prop, r);
+ if (stat < 0) {
+ if (term_prop != NULL) {
+ ptrblock_free(term_prop);
+ }
+ break;
+ }
+ }
+ if (stat == HE_OK) {
+ stat = dimtree_insrec_rule_elem(
+ (struct dt_rule_elem_spec **) &top,
+ rule, 0, term_prop, !commit);
+ }
+ }
+ }
+ if (stat < 0) {
+ history_undo();
+ dt->top_new = NULL;
+ return stat;
+ }
+ if (commit) {
+#ifdef DEBUG
+ if (rule_occur(dt->top, rule, 1)) {
+ DPRINT(DEBUG_DIMTREE, "rule present in original"
+ "structure\n");
+ return HE_IMPOSSIBLE_CONDITION;
+ }
+#endif
+ dt->top = top;
+ dt->top_new = NULL;
+ synchronize_rcu();
+ history_commit(0);
+ assert(history_is_empty());
+ } else {
+ assert((IS_RULE(top) && IS_RULE(dt->top)) ||
+ !history_is_empty());
+ dt->need_commit = 1;
+ dt->top_new = top;
+ }
+ return HE_OK;
+}
+
+#ifdef DEBUG
+void
+dt_rule_print(const struct dt_rule *rule);
+#endif
+
+hipac_error
+dimtree_insert(struct dimtree *dt, struct dt_rule *rule, __u32 origin,
+ int inc, int commit)
+{
+ DPRINT(DEBUG_DIMTREE,
+ "----------------------------------------------------------\n");
+ DPRINT(DEBUG_DIMTREE, "%s: origin: %X, inc: %d, commit: %d\n",
+ __FUNCTION__, origin, inc, commit);
+ DPRINT(DEBUG_DIMTREE, "dt: origin: %X, need_commit: %u,"
+ " chain: %s (len: %u)\n", dt->origin, dt->need_commit,
+ dt->chain->name, dt->chain->len);
+#ifdef DEBUG
+ if (dt->top_new == NULL) {
+ if (dt->top != NULL) {
+ if (IS_RLP(dt->top)) {
+ print_rlp((struct rlp_spec *) dt->top);
+ } else if (IS_ELEM(dt->top)) {
+ print_elem((struct dt_elem *) dt->top);
+ DPRINT(DEBUG_DIMTREE, "\n");
+ } else {
+ DPRINT(DEBUG_DIMTREE, "top level rule: %p\n",
+ dt->top);
+ }
+ }
+ } else {
+ if (IS_RLP(dt->top_new)) {
+ print_rlp((struct rlp_spec *) dt->top_new);
+ } else if (IS_ELEM(dt->top_new)) {
+ print_elem((struct dt_elem *) dt->top_new);
+ DPRINT(DEBUG_DIMTREE, "\n");
+ } else {
+ DPRINT(DEBUG_DIMTREE, "top level rule: %p\n",
+ dt->top_new);
+ }
+ }
+ if (hipac_debug & DEBUG_DIMTREE) {
+ dt_rule_print(rule);
+ }
+#endif
+ return dimtree_insert_intern(dt, rule, origin, inc, 1, commit);
+}
+
+static struct dt_rule *
+dimtree_delete_find_best_term(struct dimtree *dt,
+ const struct dt_rule *term_rule, __u32 *ntm_num)
+{
+ struct list_head *lh;
+ struct dt_rule *cr;
+
+ if (unlikely(dt == NULL || term_rule == NULL || ntm_num == NULL)) {
+ ARG_MSG;
+ return NULL;
+ }
+
+ *ntm_num = 0;
+ for (lh = term_rule->head.next; lh != &dt->chain->head;
+ lh = lh->next) {
+ cr = list_entry(lh, struct dt_rule, head);
+ if (cr->deleted) {
+ continue;
+ }
+ if (IS_RULE_TERM(cr)) {
+ return cr;
+ } else if (cr->spec.action != TARGET_DUMMY) {
+ (*ntm_num)++;
+ }
+ }
+ return NULL;
+}
+
+/* from and to are exclusive */
+static hipac_error
+dimtree_delete_insert_ntm(struct dimtree *dt, struct dt_elem **e,
+ const struct dt_rule *from, const struct dt_rule *to)
+{
+ struct list_head *lh;
+ struct dt_rule *cr;
+ int stat;
+
+ if (unlikely(dt == NULL || e == NULL || *e == NULL || to == NULL)) {
+ ARG_ERR;
+ }
+
+ for (lh = (from == NULL ? dt->chain->head.next : from->head.next);
+ lh != &to->head; lh = lh->next) {
+ cr = list_entry(lh, struct dt_rule, head);
+ if (cr->deleted || cr->spec.action == TARGET_DUMMY) {
+ continue;
+ }
+ assert(cr->spec.pos < to->spec.pos);
+ assert(!IS_RULE_TERM(cr));
+ stat = ptrblock_insert_embed((void **) e,
+ offsetof(struct dt_elem,
+ ntm_rules), cr,
+ (*e)->ntm_rules.len);
+ if (stat < 0) {
+ return stat;
+ }
+ }
+ return HE_OK;
+}
+
+static hipac_error
+dimtree_delete_rule_elem(struct dt_rule_elem_spec **rule_elem,
+ const struct dt_rule *rule, struct dimtree *dt,
+ int newspec_set)
+{
+ struct dt_elem *e;
+ int stat;
+ __u32 i;
+
+ if (IS_RULE(*rule_elem)) {
+ struct dt_rule *r = (struct dt_rule *) *rule_elem;
+ struct dt_rule *term_rule;
+ __u32 ntm_num;
+
+ if (r != rule) {
+ /* deleted rule must have a higher position than r */
+ return HE_OK;
+ }
+ term_rule = dimtree_delete_find_best_term(dt, rule, &ntm_num);
+ if (term_rule == NULL) {
+ IMPOSSIBLE_CONDITION("attempt to delete the only "
+ "terminal rule");
+ }
+ if (ntm_num == 0) {
+ *rule_elem = (struct dt_rule_elem_spec *) term_rule;
+ return HE_OK;
+ } else {
+ struct dt_elem *e = elem_new_empty(term_rule);
+ if (e == NULL) {
+ return HE_LOW_MEMORY;
+ }
+ stat = dimtree_delete_insert_ntm(dt, &e, rule,
+ term_rule);
+ if (stat < 0) {
+ elem_free(e);
+ return stat;
+ }
+ assert(e->ntm_rules.len > 0);
+ stat = history_new((struct gen_spec *) e, newspec_set);
+ if (stat < 0) {
+ elem_free(e);
+ return stat;
+ }
+ *rule_elem = (struct dt_rule_elem_spec *) e;
+ return HE_OK;
+ }
+ }
+
+ assert(IS_ELEM(*rule_elem));
+ e = (struct dt_elem *) *rule_elem;
+ assert(e->term_rule != NULL);
+ if (IS_RULE_TERM(rule)) {
+ struct dt_rule *term_rule;
+ __u32 ntm_num;
+
+ if (e->term_rule != rule) {
+ /* deleted rule must have a higher position than
+ e->term_rule */
+ assert(rule->spec.pos > e->term_rule->spec.pos);
+ return HE_OK;
+ }
+ term_rule = dimtree_delete_find_best_term(dt, rule, &ntm_num);
+ if (term_rule == NULL) {
+ IMPOSSIBLE_CONDITION("attempt to delete the only "
+ "terminal rule");
+ }
+ stat = dimtree_delete_insert_ntm(
+ dt, (struct dt_elem **) rule_elem, rule, term_rule);
+ if (stat < 0) {
+ /* we only care about rule_elem if its address has
+ changed; otherwise rule_elem is handled by the
+ history */
+ if (e != (struct dt_elem *) *rule_elem) {
+ history_del_invalid((struct gen_spec *) e);
+ elem_free((struct dt_elem *) *rule_elem);
+ }
+ return stat;
+ }
+ if (e != (struct dt_elem *) *rule_elem) {
+ history_del_invalid((struct gen_spec *) e);
+ stat = history_new((struct gen_spec *)
+ *rule_elem, newspec_set);
+ if (stat < 0) {
+ elem_free((struct dt_elem *) *rule_elem);
+ return stat;
+ }
+ }
+ (*(struct dt_elem **) rule_elem)->term_rule = term_rule;
+ return HE_OK;
+ } else {
+ for (i = 0; i < e->ntm_rules.len &&
+ ((struct dt_rule *)
+ e->ntm_rules.p[i])->spec.pos <
+ rule->spec.pos; i++);
+ if (i >= e->ntm_rules.len || e->ntm_rules.p[i] != rule) {
+ /* deleted rule must have a higher position than
+ e->ntm_rules.p[e->ntm_rules.len - 1] */
+ return HE_OK;
+ }
+ if (e->ntm_rules.len == 1) {
+ struct dt_rule_elem_spec *tm =
+ (struct dt_rule_elem_spec *)
+ e->term_rule;
+ stat = history_obsolete((struct gen_spec *) e,
+ newspec_set);
+ if (stat < 0) {
+ return stat;
+ }
+ *rule_elem = tm;
+ return HE_OK;
+ } else {
+ stat = ptrblock_delete_pos_embed(
+ (void **) rule_elem,
+ offsetof(struct dt_elem, ntm_rules),
+ i);
+ if (stat < 0) {
+ /* we only care about rule_elem if its address
+ has changed; otherwise rule_elem is
+ handled by the history */
+ if (e != (struct dt_elem *) *rule_elem) {
+ history_del_invalid((struct gen_spec *)
+ e);
+ elem_free((struct dt_elem *)
+ *rule_elem);
+ }
+ return stat;
+ }
+ if (e != (struct dt_elem *) *rule_elem) {
+ history_del_invalid((struct gen_spec *) e);
+ stat = history_new((struct gen_spec *)
+ *rule_elem, newspec_set);
+ if (stat < 0) {
+ elem_free((struct dt_elem *)
+ *rule_elem);
+ return stat;
+ }
+ }
+ return HE_OK;
+ }
+ }
+}
+
+hipac_error
+dimtree_delete(struct dimtree *dt, struct dt_rule *rule, int commit)
+{
+ struct gen_spec *top;
+ int stat;
+
+ if (unlikely(dt == NULL || rule == NULL || rule->deleted ||
+ rule == list_entry(dt->chain->head.next,
+ struct dt_rule, head) ||
+ rule == list_entry(dt->chain->head.prev,
+ struct dt_rule, head))) {
+ ARG_ERR;
+ }
+
+ assert(dt->top != NULL);
+ DPRINT(DEBUG_DIMTREE,
+ "----------------------------------------------------------\n");
+ DPRINT(DEBUG_DIMTREE, "%s: commit: %d\n", __FUNCTION__, commit);
+ DPRINT(DEBUG_DIMTREE, "dt: origin: %X, need_commit: %u,"
+ " chain: %s (len: %u)\n", dt->origin, dt->need_commit,
+ dt->chain->name, dt->chain->len);
+#ifdef DEBUG
+ if (dt->top_new == NULL) {
+ if (dt->top != NULL) {
+ if (IS_RLP(dt->top)) {
+ print_rlp((struct rlp_spec *) dt->top);
+ } else if (IS_ELEM(dt->top)) {
+ print_elem((struct dt_elem *) dt->top);
+ DPRINT(DEBUG_DIMTREE, "\n");
+ } else {
+ DPRINT(DEBUG_DIMTREE, "top level rule: %p\n",
+ dt->top);
+ }
+ }
+ } else {
+ if (IS_RLP(dt->top_new)) {
+ print_rlp((struct rlp_spec *) dt->top_new);
+ } else if (IS_ELEM(dt->top_new)) {
+ print_elem((struct dt_elem *) dt->top_new);
+ DPRINT(DEBUG_DIMTREE, "\n");
+ } else {
+ DPRINT(DEBUG_DIMTREE, "top level rule: %p\n",
+ dt->top_new);
+ }
+ }
+ if (hipac_debug & DEBUG_DIMTREE) {
+ dt_rule_print(rule);
+ }
+#endif
+
+ if (!dt->need_commit) {
+ /* first operation in a series => clone top level structure
+ if necessary */
+ if (IS_RLP(dt->top)) {
+ stat = rlp_clone((struct rlp_spec *) dt->top,
+ (struct rlp_spec **) &top);
+ if (stat < 0) {
+ return stat;
+ }
+ stat = history_replace(dt->top, top, !commit);
+ if (stat < 0) {
+ rlp_free((struct rlp_spec *) top);
+ history_undo();
+ return stat;
+ }
+ } else if (IS_ELEM(dt->top)) {
+ stat = elem_clone((struct dt_elem *) dt->top,
+ (struct dt_elem **) &top);
+ if (stat < 0) {
+ return stat;
+ }
+ stat = history_replace(dt->top, top, !commit);
+ if (stat < 0) {
+ elem_free((struct dt_elem *) top);
+ history_undo();
+ return stat;
+ }
+ } else {
+ assert(IS_RULE(dt->top));
+ top = dt->top;
+ }
+ } else {
+ top = dt->top_new;
+ }
+
+ /* delete rule from rlp / elementary interval */
+ if (IS_RLP(top)) {
+ stat = dimtree_delrec((struct rlp_spec **) &top, rule,
+ 0, NULL, !commit);
+ } else {
+ stat = dimtree_delete_rule_elem((struct dt_rule_elem_spec **)
+ &top, rule, dt, !commit);
+ }
+ if (stat < 0) {
+ history_undo();
+ return stat;
+ }
+
+ if (commit) {
+#ifdef DEBUG
+ if (dt->top != NULL && IS_RLP(dt->top) &&
+ !rule_occur(dt->top, rule, 0)) {
+ /* this check only works if the top level structure is
+ a rlp */
+ DPRINT(DEBUG_DIMTREE, "rule %p not present in "
+ "original rlp\n", rule);
+ return HE_IMPOSSIBLE_CONDITION;
+ }
+#endif
+ dt->top = top;
+ dt->top_new = NULL;
+ synchronize_rcu();
+ history_commit(0);
+ assert(history_is_empty());
+ } else {
+ assert((IS_RULE(top) && IS_RULE(dt->top)) ||
+ !history_is_empty());
+ dt->need_commit = 1;
+ dt->top_new = top;
+ rule->deleted = 1;
+ }
+ return HE_OK;
+}
+
+void
+dimtree_commit(struct ptrblock *dt_block)
+{
+ struct dimtree *dt;
+ __u32 i;
+
+ if (unlikely(dt_block == NULL)) {
+ ARG_MSG;
+ return;
+ }
+
+ for (i = 0; i < dt_block->len; i++) {
+ dt = (struct dimtree *) dt_block->p[i];
+ if (dt->need_commit) {
+ dt->top = dt->top_new;
+ dt->top_new = NULL;
+ dt->need_commit = 0;
+ }
+ }
+ synchronize_rcu();
+ history_commit(1);
+ assert(history_is_empty());
+}
+
+void
+dimtree_failed(struct ptrblock *dt_block)
+{
+ struct list_head *lh;
+ struct dimtree *dt;
+ __u32 i;
+
+ if (unlikely(dt_block == NULL)) {
+ ARG_MSG;
+ return;
+ }
+
+ for (i = 0; i < dt_block->len; i++) {
+ dt = (struct dimtree *) dt_block->p[i];
+ if (dt->need_commit) {
+ dt->need_commit = 0;
+ dt->top_new = NULL;
+ list_for_each (lh, &dt->chain->head) {
+ list_entry(lh, struct dt_rule,
+ head)->deleted = 0;
+ }
+ }
+ assert(dt->need_commit || dt->top_new == NULL);
+ }
+ history_undo();
+}
+
+void
+dimtree_chain_fix(struct ptrblock *dt_block)
+{
+ struct list_head *lh;
+ struct dt_rule *rule;
+ __u32 i, prevpos_new, prevpos_org;
+ struct dimtree *dt;
+
+ if (unlikely(dt_block == NULL)) {
+ ARG_MSG;
+ return;
+ }
+
+ for (i = 0; i < dt_block->len; i++) {
+ dt = (struct dimtree *) dt_block->p[i];
+ assert(!list_empty(&dt->chain->head));
+ if (dt->chain->first == NULL) {
+ lh = dt->chain->head.next;
+ prevpos_org = list_entry(lh, struct dt_rule,
+ head)->spec.pos;
+ prevpos_new = list_entry(lh, struct dt_rule,
+ head)->spec.pos = 0;
+ lh = lh->next;
+ } else {
+ lh = dt->chain->first->head.next;
+ prevpos_org = prevpos_new = dt->chain->first->spec.pos;
+ }
+ dt->chain->first = list_entry(dt->chain->head.prev,
+ struct dt_rule, head);
+ for (; lh != &dt->chain->head; lh = lh->next) {
+ rule = list_entry(lh, struct dt_rule, head);
+ if (unlikely(rule->spec.pos == prevpos_org)) {
+ rule->spec.pos = prevpos_new;
+ } else {
+ prevpos_org = rule->spec.pos;
+ rule->spec.pos = ++prevpos_new;
+ }
+ }
+ }
+}
+
+static hipac_error
+hipac_get_rlp_stat_rec(struct gen_spec *g, struct hipac_rlp_stat *stat,
+ __u8 depth, __u8 parent_dimid)
+{
+ struct gen_spec **nextspec = NULL;
+ struct rlp_spec *rlp;
+ int ret;
+ __u16 n;
+
+ if (g == NULL) {
+ return HE_OK;
+ }
+ if (IS_RULE(g) || IS_ELEM(g)) {
+ if (depth < 1) {
+ return HE_OK;
+ }
+ stat->termptr_num++;
+ if (parent_dimid >= LEN(stat->termptr_dimid_num)) {
+ IMPOSSIBLE_CONDITION("termptr_dimid_num too small");
+ }
+ stat->termptr_dimid_num[parent_dimid]++;
+ if (depth - 1 >= LEN(stat->termptr_depth_num)) {
+ IMPOSSIBLE_CONDITION("termptr_depth_num too small");
+ }
+ stat->termptr_depth_num[depth - 1]++;
+ if (IS_ELEM(g)) {
+ struct dt_elem *e = (struct dt_elem *) g;
+ __u32 ptr_num;
+ stat->dt_elem_num++;
+ ptr_num = e->ntm_rules.len +
+ (e->term_rule == NULL ? 0 : 1);
+ stat->dt_elem_ptr_num += ptr_num;
+ stat_distribution_add(stat->dt_elem_stat,
+ LEN(stat->dt_elem_stat),
+ ptr_num);
+ }
+ return HE_OK;
+ }
+
+ /* rlp statistics */
+ rlp = (struct rlp_spec *) g;
+ if (hp_size(rlp, &stat->rlp_mem_real, &stat->rlp_mem_tight) < 0) {
+ return HE_IMPOSSIBLE_CONDITION;
+ }
+ if (hp_size(*termrule(rlp), &stat->termrule_mem_real,
+ &stat->termrule_mem_tight) < 0) {
+ return HE_IMPOSSIBLE_CONDITION;
+ }
+ stat->rlp_num++;
+ if (rlp->dimid >= LEN(stat->rlp_dimid_num)) {
+ IMPOSSIBLE_CONDITION("rlp_dimid_num too small");
+ }
+ stat->rlp_dimid_num[rlp->dimid]++;
+ if (depth >= LEN(stat->rlp_depth_num)) {
+ IMPOSSIBLE_CONDITION("rlp_depth_num too small");
+ }
+ stat->rlp_depth_num[depth]++;
+ if (*termrule(rlp) != NULL) {
+ stat->termrule_num++;
+ stat->termrule_ptr_num += (*termrule(rlp))->len;
+ }
+ stat->keys_num += rlp->num;
+ if (rlp->dimid >= LEN(stat->rlp_dimid_keys_stat)) {
+ IMPOSSIBLE_CONDITION("rlp_dimid_keys_stat too small");
+ }
+ stat_distribution_add(stat->rlp_dimid_keys_stat[rlp->dimid],
+ LEN(*stat->rlp_dimid_keys_stat), rlp->num);
+ if (depth > 0) {
+ stat->nontermptr_num++;
+ if (parent_dimid >= LEN(stat->nontermptr_dimid_num)) {
+ IMPOSSIBLE_CONDITION("nontermptr_dimid_num too small");
+ }
+ stat->nontermptr_dimid_num[parent_dimid]++;
+ if (depth - 1 >= LEN(stat->nontermptr_depth_num)) {
+ IMPOSSIBLE_CONDITION("nontermptr_depth_num too small");
+ }
+ stat->nontermptr_depth_num[depth - 1]++;
+ }
+
+ /* recursion */
+ nextspec = rlp_nextspec(rlp);
+ assert(nextspec != NULL);
+
+ for (n = 0; n < rlp->num; n++) {
+ ret = hipac_get_rlp_stat_rec(*(nextspec + n), stat,
+ depth + 1, rlp->dimid);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+ if (HAS_WILDCARD_SPEC(rlp)) {
+ ret = hipac_get_rlp_stat_rec(*WILDCARD(rlp), stat,
+ depth + 1, rlp->dimid);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+ return HE_OK;
+}
+
+hipac_error
+hipac_get_rlp_stat(void *hipac, struct hipac_rlp_stat *stat)
+{
+ struct dimtree *dt = hipac;
+
+ if (dt == NULL || stat == NULL) {
+ ARG_ERR;
+ }
+
+ memset(stat, 0, sizeof(*stat));
+ stat->total_mem_tight = mem_current_tight;
+ stat->total_mem_real = mem_current_real;
+ if (dt->top == NULL) {
+ IMPOSSIBLE_CONDITION("top level rlp NULL");
+ }
+ return hipac_get_rlp_stat_rec(dt->top, stat, 0, 0);
+}
+
+hipac_error
+hipac_get_dimtree_stat(void *hipac, struct hipac_dimtree_stat *stat)
+{
+ struct dimtree *dt = hipac;
+ struct list_head *lh;
+ struct dt_rule *r;
+ __u32 pos, num;
+
+ if (dt == NULL || stat == NULL) {
+ ARG_ERR;
+ }
+
+ memset(stat, 0, sizeof(*stat));
+ if (hp_size(dt->chain, &stat->chain_mem_real,
+ &stat->chain_mem_tight) < 0) {
+ return HE_IMPOSSIBLE_CONDITION;
+ }
+ stat->rule_num = dt->chain->len;
+ pos = num = 0;
+ list_for_each (lh, &dt->chain->head) {
+ r = list_entry(lh, struct dt_rule, head);
+ if (r->spec.pos == pos) {
+ num++;
+ } else {
+ if (num > 1) {
+ stat_distribution_add(
+ stat->rules_same_pos_stat,
+ LEN(stat->rules_same_pos_stat), num);
+ }
+ num = 1;
+ pos = r->spec.pos;
+ }
+ if (hp_size(r, &stat->chain_mem_real,
+ &stat->chain_mem_tight) < 0) {
+ return HE_IMPOSSIBLE_CONDITION;
+ }
+ if (HAS_EXEC_MATCH(r)) {
+ stat->rules_with_exec_matches++;
+ }
+ if (IS_TARGET_EXEC(r)) {
+ stat->rules_with_exec_target++;
+ }
+ if (r->dt_match_len >= LEN(stat->dt_match_stat)) {
+ IMPOSSIBLE_CONDITION("dt_match_stat too small");
+ }
+ stat->dt_match_stat[r->dt_match_len]++;
+ }
+ if (num > 1) {
+ stat_distribution_add(stat->rules_same_pos_stat,
+ LEN(stat->rules_same_pos_stat), num);
+ }
+ return HE_OK;
+}
+
+
+
+/*
+ * hipac matching algorithm
+ */
+
+
+#ifdef SINGLE_PATH
+
+/* match packet against the rlp in dt and return the terminal action
+ (TARGET_ACCEPT or TARGET_DROP) of the highest priority terminal rule or
+ the policy if there is no such rule */
+hipac_target_t
+hipac_match(void *hipac, const void *packet)
+{
+ struct dt_rule *rule;
+ struct gen_spec *t;
+ __u8 action, i, j;
+ int hotdrop = 0;
+
+ t = ((struct dimtree *) hipac)->top;
+ assert(t != NULL);
+ assert(packet != NULL);
+
+ while (!hotdrop && IS_RLP(t)) {
+ t = ((struct rlp_spec *) t)->locate((struct rlp_spec *) t,
+ packet, &hotdrop);
+ }
+ if (hotdrop)
+ return TARGET_DROP;
+
+ if (likely(IS_RULE(t))) {
+ assert(IS_RULE_TERM((struct dt_rule *) t));
+ return ((struct dt_rule *) t)->spec.action;
+ }
+
+ /* initialization required to prevent compiler warning */
+ action = 0;
+
+ assert(IS_ELEM(t));
+ assert(((struct dt_elem *) t)->term_rule != NULL);
+ assert(IS_RULE_TERM(((struct dt_elem *) t)->term_rule));
+ assert(((struct dt_elem *) t)->ntm_rules.p != NULL);
+ for (i = 0; i < ((struct dt_elem *) t)->ntm_rules.len; i++) {
+ rule = ((struct dt_elem *) t)->ntm_rules.p[i];
+ if (HAS_EXEC_MATCH(rule)) {
+ assert(!(rule->exec_match->len & 1));
+ assert(rule->exec_match->len >= 2);
+ for (j = 0; j < rule->exec_match->len; j += 2) {
+ action = match_fn(packet,
+ rule->exec_match->p[j],
+ rule->exec_match->p[j + 1]);
+ if (action != MATCH_YES) {
+ break;
+ }
+ }
+ if (action == MATCH_NO) {
+ continue;
+ }
+ if (action == MATCH_HOTDROP) {
+ return TARGET_DROP;
+ }
+ }
+ action = IS_TARGET_EXEC(rule) ?
+ target_fn(packet, rule->exec_target)
+ : rule->spec.action;
+ if (action != TARGET_NONE) {
+ assert(action == TARGET_ACCEPT ||
+ action == TARGET_DROP);
+ return action;
+ }
+ }
+
+ /* terminal rule or policy matches */
+ return ((struct dt_elem *) t)->term_rule->spec.action;
+}
+
+# ifdef DEBUG
+
+/*
+ * debugging version of hipac_match (single path)
+ */
+
+/* return the matched rules in order - for verification purposes only */
+struct ptrblock *
+hipac_match_debug(struct dimtree *hipac, const void *packet)
+{
+ struct ptrblock *b = NULL;
+ struct dt_rule *rule;
+ struct gen_spec *t;
+ __u8 action, i, j;
+ int hotdrop = 0;
+
+ t = ((struct dimtree *) hipac)->top;
+ assert(t != NULL);
+ assert(packet != NULL);
+
+ while (!hotdrop && IS_RLP(t)) {
+ t = ((struct rlp_spec *) t)->locate((struct rlp_spec *) t,
+ packet, &hotdrop);
+ }
+ if (hotdrop)
+ return b;
+
+ if (likely(IS_RULE(t))) {
+ assert(IS_RULE_TERM((struct dt_rule *) t));
+ if (ptrblock_append(&b, t) < 0) {
+ ERR("ptrblock_append failed");
+ }
+ return b;
+ }
+
+ /* initialization required to prevent compiler warning */
+ action = 0;
+
+ assert(IS_ELEM(t));
+ assert(((struct dt_elem *) t)->term_rule != NULL);
+ assert(IS_RULE_TERM(((struct dt_elem *) t)->term_rule));
+ assert(((struct dt_elem *) t)->ntm_rules.p != NULL);
+ for (i = 0; i < ((struct dt_elem *) t)->ntm_rules.len; i++) {
+ rule = ((struct dt_elem *) t)->ntm_rules.p[i];
+ if (HAS_EXEC_MATCH(rule)) {
+ assert(!(rule->exec_match->len & 1));
+ assert(rule->exec_match->len >= 2);
+ for (j = 0; j < rule->exec_match->len; j += 2) {
+ action = match_fn(packet,
+ rule->exec_match->p[j],
+ rule->exec_match->p[j + 1]);
+ if (action != MATCH_YES) {
+ break;
+ }
+ }
+ if (action == MATCH_NO) {
+ continue;
+ }
+ if (action == MATCH_HOTDROP) {
+ return b;
+ }
+ }
+ if (ptrblock_append(&b, rule) < 0) {
+ ERR("ptrblock_append failed");
+ return b;
+ }
+ action = IS_TARGET_EXEC(rule) ?
+ target_fn(packet, rule->exec_target)
+ : rule->spec.action;
+ if (action != TARGET_NONE){
+ assert(action == TARGET_ACCEPT ||
+ action == TARGET_DROP);
+ return b;
+ }
+ }
+
+ /* terminal rule or policy matches */
+ if (ptrblock_append(&b, ((struct dt_elem *) t)->term_rule) < 0) {
+ ERR("ptrblock_append failed");
+ }
+ return b;
+}
+
+# endif // DEBUG
+
+#else // SINGLE_PATH
+
+static inline hipac_target_t
+match_packet(const struct dimtree *dt, const void *packet,
+ struct dt_rule *rule)
+{
+ __u32 i;
+
+ if (HAS_EXEC_MATCH(rule)) {
+ assert(!(rule->exec_match->len & 1));
+ assert(rule->exec_match->len >= 2);
+ for (i = 0; i < rule->exec_match->len; i += 2) {
+ switch (match_fn(packet, rule->exec_match->p[i],
+ rule->exec_match->p[i + 1])) {
+ case MATCH_YES:
+ break;
+
+ case MATCH_NO:
+ return TARGET_NONE;
+
+ case MATCH_HOTDROP:
+ return TARGET_DROP;
+ }
+ }
+ }
+ return IS_TARGET_EXEC(rule) ?
+ target_fn(packet, rule->exec_target) : rule->spec.action;
+}
+
+
+/* match packet against the rlp in dt and return the terminal action
+ (TARGET_ACCEPT or TARGET_DROP) of the highest priority terminal rule or
+ the policy if there is no such rule */
+hipac_target_t
+hipac_match(void *hipac, const void *packet)
+{
+# define NUM_LEAVES 4
+ /* UINT_MAX - 1 is required because of
+ if (likely(term_pos < nonterm_pos)) {...} optimization */
+ __u32 term_pos = UINT_MAX - 1;
+ __u32 nonterm_pos = UINT_MAX;
+ struct dt_rule *term_rule = NULL;
+ struct dt_rule_elem_spec *rule_elem[NUM_LEAVES];
+ struct dt_rule **ntm_rule[NUM_LEAVES];
+ struct dt_rule **ntm_end[NUM_LEAVES];
+ struct gen_spec *t;
+ __u32 ntm_next_pos, new_next;
+ __u8 ntm_rule_sz, ntm_cur_ind;
+ __u8 action, i, len, max;
+ int hotdrop = 0;
+
+ max = 1;
+ i = len = 0;
+ rule_elem[0] = (struct dt_rule_elem_spec *)
+ ((struct dimtree *) hipac)->top;
+ assert(packet != NULL);
+ assert(rule_elem[0] != NULL);
+ assert(!IS_RULE(rule_elem[0]) ||
+ IS_RULE_TERM(((struct dt_rule *) rule_elem[0])));
+ assert(!IS_ELEM(rule_elem[0]) ||
+ (IS_RULE_TERM(((struct dt_elem *) rule_elem[0])->term_rule) &&
+ ((struct dt_elem *) rule_elem[0])->ntm_rules.len > 0));
+
+ do {
+ t = (struct gen_spec *) rule_elem[i++];
+ while (!hotdrop && t && IS_RLP(t)) {
+ t = ((struct rlp_spec *) t)->locate(
+ (struct rlp_spec *) t, packet, &hotdrop,
+ (struct gen_spec **) rule_elem, &max);
+ };
+ if (hotdrop)
+ return TARGET_DROP;
+ assert(max <= NUM_LEAVES);
+ if (unlikely(t == NULL)) {
+ continue;
+ }
+ rule_elem[len++] = (struct dt_rule_elem_spec *) t;
+ if (likely(IS_RULE(t))) {
+ if (likely(IS_RULE_TERM((struct dt_rule *) t))) {
+ if (((struct dt_rule *) t)->spec.pos <
+ term_pos) {
+ term_rule = (struct dt_rule *) t;
+ term_pos = term_rule->spec.pos;
+ }
+ } else if (((struct dt_rule *) t)->spec.pos <
+ nonterm_pos) {
+ nonterm_pos = ((struct dt_rule *)
+ t)->spec.pos;
+ }
+ } else {
+ if (((struct dt_elem *) t)->term_rule != NULL &&
+ ((struct dt_elem *) t)->term_rule->spec.pos <
+ term_pos) {
+ term_rule = ((struct dt_elem *)
+ t)->term_rule;
+ term_pos = term_rule->spec.pos;
+ assert(IS_RULE_TERM(term_rule));
+ }
+ assert(((struct dt_elem *) t)->ntm_rules.len > 0);
+ if (((struct dt_rule *)
+ ((struct dt_elem *) t)->ntm_rules.p[0])->spec.pos
+ < nonterm_pos) {
+ nonterm_pos = ((struct dt_rule *)
+ ((struct dt_elem *)
+ t)->ntm_rules.p[0])->spec.pos;
+ }
+ }
+ } while (i < max);
+
+ /* optimization for the ideal case that no non-terminal rules
+ (function based matches or no terminal target) exist */
+ if (likely(term_pos < nonterm_pos)) {
+ assert(term_rule != NULL);
+ action = term_rule->spec.action;
+ return action;
+ }
+
+ /* initialize ntm_rule, ntm_end, ntm_rule_sz, ntm_cur_ind and
+ ntm_next_pos now that term_pos is given */
+ ntm_rule_sz = ntm_cur_ind = 0;
+ ntm_next_pos = UINT_MAX;
+ for (i = 0; i < len; i++) {
+ assert(rule_elem[i] != NULL);
+ if (likely(IS_RULE(rule_elem[i]))) {
+ struct dt_rule **r = (struct dt_rule **) &rule_elem[i];
+ __u32 pos = (*r)->spec.pos;
+ if (!IS_RULE_TERM(*r) && pos < term_pos) {
+ if (pos == nonterm_pos) {
+ ntm_cur_ind = ntm_rule_sz;
+ } else if (pos < ntm_next_pos) {
+ ntm_next_pos = pos;
+ }
+ ntm_rule[ntm_rule_sz] = r;
+ ntm_end[ntm_rule_sz++] = r;
+ }
+ } else {
+ struct dt_elem *e = (struct dt_elem *) rule_elem[i];
+ __u32 pos = ((struct dt_rule *)
+ *e->ntm_rules.p)->spec.pos;
+ if (pos < term_pos) {
+ if (pos == nonterm_pos) {
+ ntm_cur_ind = ntm_rule_sz;
+ } else if (pos < ntm_next_pos) {
+ ntm_next_pos = pos;
+ }
+ ntm_rule[ntm_rule_sz] =
+ (struct dt_rule **) e->ntm_rules.p;
+ ntm_end[ntm_rule_sz++] = (struct dt_rule **)
+ &e->ntm_rules.p[e->ntm_rules.len - 1];
+ }
+ }
+ }
+ assert(ntm_rule_sz > 0);
+
+ /* process non-terminal rules in order up to term_pos */
+ ntm_next_pos = ntm_next_pos < term_pos ? ntm_next_pos : term_pos;
+ while (ntm_rule_sz > 0 &&
+ (*ntm_rule[ntm_cur_ind])->spec.pos < ntm_next_pos) {
+
+ /* match packet against current block of rules */
+ for (; (ntm_rule[ntm_cur_ind] <= ntm_end[ntm_cur_ind] &&
+ (*ntm_rule[ntm_cur_ind])->spec.pos < ntm_next_pos);
+ ntm_rule[ntm_cur_ind]++) {
+
+ switch (action =
+ match_packet((struct dimtree *) hipac, packet,
+ *ntm_rule[ntm_cur_ind])) {
+
+ case TARGET_NONE:
+ break;
+ default:
+ assert(action == TARGET_ACCEPT ||
+ action == TARGET_DROP);
+ return action;
+ }
+ }
+
+ /* remove current block of rules if no rule is left that may
+ be matched */
+ if (ntm_rule[ntm_cur_ind] > ntm_end[ntm_cur_ind] ||
+ (*ntm_rule[ntm_cur_ind])->spec.pos >= term_pos) {
+ ntm_rule_sz--;
+ assert(ntm_cur_ind <= ntm_rule_sz);
+ ntm_rule[ntm_cur_ind] = ntm_rule[ntm_rule_sz];
+ ntm_end[ntm_cur_ind] = ntm_end[ntm_rule_sz];
+ }
+
+ /* set ntm_cur_ind and ntm_next_pos for next run */
+ new_next = term_pos;
+ for (i = 0; i < ntm_rule_sz; i++) {
+ if ((*ntm_rule[i])->spec.pos == ntm_next_pos) {
+ ntm_cur_ind = i;
+ } else if ((*ntm_rule[i])->spec.pos < new_next) {
+ new_next = (*ntm_rule[i])->spec.pos;
+ }
+ }
+ ntm_next_pos = new_next;
+ }
+
+ /* terminal rule or policy matches */
+ assert(term_rule != NULL);
+ action = term_rule->spec.action;
+ return action;
+}
+
+# ifdef DEBUG
+
+/*
+ * debugging version of hipac_match (multi path)
+ */
+
+/* for verification purposes only */
+static inline hipac_target_t
+match_packet_debug(struct ptrblock **b, const struct dimtree *dt,
+ const void *packet, struct dt_rule *rule)
+{
+ __u32 i;
+
+ if (HAS_EXEC_MATCH(rule)) {
+ assert(!(rule->exec_match->len & 1));
+ assert(rule->exec_match->len >= 2);
+ for (i = 0; i < rule->exec_match->len; i += 2) {
+ switch (match_fn(packet, rule->exec_match->p[i],
+ rule->exec_match->p[i + 1])) {
+ case MATCH_YES:
+ break;
+
+ case MATCH_NO:
+ return TARGET_NONE;
+
+ case MATCH_HOTDROP:
+ return TARGET_DROP;
+ }
+ }
+ }
+ if (ptrblock_append(b, rule) < 0) {
+ ERR("ptrblock_append failed");
+ }
+ return IS_TARGET_EXEC(rule) ?
+ target_fn(packet, rule->exec_target) : rule->spec.action;
+}
+
+/* return the matched rules in order - for verification purposes only */
+struct ptrblock *
+hipac_match_debug(struct dimtree *hipac, const void *packet)
+{
+# define NUM_LEAVES 4
+ struct ptrblock *b = NULL;
+ /* UINT_MAX - 1 is required because of
+ if (likely(term_pos < nonterm_pos)) {...} optimization */
+ __u32 term_pos = UINT_MAX - 1;
+ __u32 nonterm_pos = UINT_MAX;
+ struct dt_rule *term_rule = NULL;
+ struct dt_rule_elem_spec *rule_elem[NUM_LEAVES];
+ struct dt_rule **ntm_rule[NUM_LEAVES];
+ struct dt_rule **ntm_end[NUM_LEAVES];
+ struct gen_spec *t;
+ __u32 ntm_next_pos, new_next;
+ __u8 ntm_rule_sz, ntm_cur_ind;
+ __u8 action, i, len, max;
+ int hotdrop = 0;
+
+ max = 1;
+ i = len = 0;
+ rule_elem[0] = (struct dt_rule_elem_spec *)
+ ((struct dimtree *) hipac)->top;
+ assert(packet != NULL);
+ assert(rule_elem[0] != NULL);
+ assert(!IS_RULE(rule_elem[0]) ||
+ IS_RULE_TERM(((struct dt_rule *) rule_elem[0])));
+ assert(!IS_ELEM(rule_elem[0]) ||
+ (IS_RULE_TERM(((struct dt_elem *) rule_elem[0])->term_rule) &&
+ ((struct dt_elem *) rule_elem[0])->ntm_rules.len > 0));
+
+ do {
+ t = (struct gen_spec *) rule_elem[i++];
+ while (!hotdrop && t && IS_RLP(t)) {
+ t = ((struct rlp_spec *) t)->locate(
+ (struct rlp_spec *) t, packet, &hotdrop,
+ (struct gen_spec **) rule_elem, &max);
+ };
+ if (hotdrop)
+ return b;
+ assert(max <= NUM_LEAVES);
+ if (unlikely(t == NULL)) {
+ continue;
+ }
+ rule_elem[len++] = (struct dt_rule_elem_spec *) t;
+ if (likely(IS_RULE(t))) {
+ if (likely(IS_RULE_TERM((struct dt_rule *) t))) {
+ if (((struct dt_rule *) t)->spec.pos <
+ term_pos) {
+ term_rule = (struct dt_rule *) t;
+ term_pos = term_rule->spec.pos;
+ }
+ } else if (((struct dt_rule *) t)->spec.pos <
+ nonterm_pos) {
+ nonterm_pos = ((struct dt_rule *)
+ t)->spec.pos;
+ }
+ } else {
+ if (((struct dt_elem *) t)->term_rule != NULL &&
+ ((struct dt_elem *) t)->term_rule->spec.pos <
+ term_pos) {
+ term_rule = ((struct dt_elem *)
+ t)->term_rule;
+ term_pos = term_rule->spec.pos;
+ assert(IS_RULE_TERM(term_rule));
+ }
+ assert(((struct dt_elem *) t)->ntm_rules.len > 0);
+ if (((struct dt_rule *)
+ ((struct dt_elem *) t)->ntm_rules.p[0])->spec.pos
+ < nonterm_pos) {
+ nonterm_pos = ((struct dt_rule *)
+ ((struct dt_elem *)
+ t)->ntm_rules.p[0])->spec.pos;
+ }
+ }
+ } while (i < max);
+
+ /* optimization for the ideal case that no non-terminal rules
+ (function based matches or no terminal target) exist */
+ if (likely(term_pos < nonterm_pos)) {
+ assert(term_rule != NULL);
+ if (ptrblock_append(&b, term_rule) < 0) {
+ ERR("ptrblock_append failed");
+ }
+ return b;
+ }
+
+ /* initialize ntm_rule, ntm_end, ntm_rule_sz, ntm_cur_ind and
+ ntm_next_pos now that term_pos is given */
+ ntm_rule_sz = ntm_cur_ind = 0;
+ ntm_next_pos = UINT_MAX;
+ for (i = 0; i < len; i++) {
+ assert(rule_elem[i] != NULL);
+ if (likely(IS_RULE(rule_elem[i]))) {
+ struct dt_rule **r = (struct dt_rule **) &rule_elem[i];
+ __u32 pos = (*r)->spec.pos;
+ if (!IS_RULE_TERM(*r) && pos < term_pos) {
+ if (pos == nonterm_pos) {
+ ntm_cur_ind = ntm_rule_sz;
+ } else if (pos < ntm_next_pos) {
+ ntm_next_pos = pos;
+ }
+ ntm_rule[ntm_rule_sz] = r;
+ ntm_end[ntm_rule_sz++] = r;
+ }
+ } else {
+ struct dt_elem *e = (struct dt_elem *) rule_elem[i];
+ __u32 pos = ((struct dt_rule *)
+ *e->ntm_rules.p)->spec.pos;
+ if (pos < term_pos) {
+ if (pos == nonterm_pos) {
+ ntm_cur_ind = ntm_rule_sz;
+ } else if (pos < ntm_next_pos) {
+ ntm_next_pos = pos;
+ }
+ ntm_rule[ntm_rule_sz] =
+ (struct dt_rule **) e->ntm_rules.p;
+ ntm_end[ntm_rule_sz++] = (struct dt_rule **)
+ &e->ntm_rules.p[e->ntm_rules.len - 1];
+ }
+ }
+ }
+ assert(ntm_rule_sz > 0);
+
+ /* process non-terminal rules in order up to term_pos */
+ ntm_next_pos = ntm_next_pos < term_pos ? ntm_next_pos : term_pos;
+ while (ntm_rule_sz > 0 &&
+ (*ntm_rule[ntm_cur_ind])->spec.pos < ntm_next_pos) {
+
+ /* match packet against current block of rules */
+ for (; (ntm_rule[ntm_cur_ind] <= ntm_end[ntm_cur_ind] &&
+ (*ntm_rule[ntm_cur_ind])->spec.pos < ntm_next_pos);
+ ntm_rule[ntm_cur_ind]++) {
+
+ switch (action =
+ match_packet_debug(&b,
+ (struct dimtree *) hipac,
+ packet,
+ *ntm_rule[ntm_cur_ind])) {
+
+ case TARGET_NONE:
+ break;
+ default:
+ assert(action == TARGET_ACCEPT ||
+ action == TARGET_DROP);
+ return b;
+ }
+ }
+
+ /* remove current block of rules if no rule is left that may
+ be matched */
+ if (ntm_rule[ntm_cur_ind] > ntm_end[ntm_cur_ind] ||
+ (*ntm_rule[ntm_cur_ind])->spec.pos >= term_pos) {
+ ntm_rule_sz--;
+ assert(ntm_cur_ind <= ntm_rule_sz);
+ ntm_rule[ntm_cur_ind] = ntm_rule[ntm_rule_sz];
+ ntm_end[ntm_cur_ind] = ntm_end[ntm_rule_sz];
+ }
+
+ /* set ntm_cur_ind and ntm_next_pos for next run */
+ new_next = term_pos;
+ for (i = 0; i < ntm_rule_sz; i++) {
+ if ((*ntm_rule[i])->spec.pos == ntm_next_pos) {
+ ntm_cur_ind = i;
+ } else if ((*ntm_rule[i])->spec.pos < new_next) {
+ new_next = (*ntm_rule[i])->spec.pos;
+ }
+ }
+ ntm_next_pos = new_next;
+ }
+
+ /* terminal rule or policy matches */
+ assert(term_rule != NULL);
+ if (ptrblock_append(&b, term_rule) < 0) {
+ ERR("ptrblock_append failed");
+ }
+ return b;
+}
+
+# endif // DEBUG
+
+#endif // SINGLE_PATH
diff -urN nf-hipac/kernel/dimtree.h nfhipac/kernel/dimtree.h
--- nf-hipac/kernel/dimtree.h 1970-01-01 08:00:00.000000000 +0800
+++ nfhipac/kernel/dimtree.h 2014-11-21 12:36:09.000000000 +0800
@@ -0,0 +1,280 @@
+/*
+ * High performance packet classification
+ *
+ *
+ * (c) 2004-2005 MARA Systems AB
+ * +-----------------------------+
+ * | Michael Bellion |
+ * | |
+ * +-----------------------------+
+ *
+ * (c) 2002-2003 hipac core team :
+ * +---------------------------+--------------------------+
+ * | Michael Bellion | Thomas Heinz |
+ * | | |
+ * +---------------------------+--------------------------+
+ *
+ * Licenced under the GNU General Public Licence, version 2.
+ */
+
+
+#ifndef _DIMTREE_H
+#define _DIMTREE_H
+
+#include "global.h"
+#include "rlp.h"
+
+/* upper bound for matches of the given bit type */
+#define MAXKEY(bittype) \
+((bittype) == BIT_U16 ? 0xffff : 0xffffffff)
+
+/* used to distinguish a rule from an elementary interval */
+#define RT_RULE 0
+#define RT_ELEM 1
+
+
+/* header for dimtree rules and elementary intervals */
+struct dt_rule_elem_spec
+{
+ unsigned rlp : 1; // must be 0
+ unsigned rtype : 1; // {RT_RULE, RT_ELEM}
+};
+
+/* header for dimtree rules */
+struct dt_rule_spec
+{
+ unsigned rlp : 1; // must be 0
+ unsigned rtype : 1; // must be RT_RULE
+ unsigned action : 4; // packet action
+ unsigned pos : 26; // position of the rule in the chain
+};
+
+/* dt_match represents the native interval match [left, right] associated
+ with dimension dimid whereby [left, right] may not be a wildcard match */
+struct dt_match
+{
+ __u8 dimid;
+ __u32 left, right;
+ char next_match[0];
+};
+
+/* dt_rule is an entry in the dt_chain; at the end of the struct we have
+ dt_match_len >= 0 dt_matches
+ if the rule has a function based target then exec_target points to the
+ target's data which is handled by target_fn;
+ the rule's exec_match pointer block references >= 0 blocks each of >= 1
+ function based matches, called fblocks;
+ the (2 * i)-th pointer of exec_match points to the beginning of the i-th
+ fblock;
+ the (2 * i + 1)-th pointer of exec_match points to the end of the i-th
+ fblock;
+ the start and end pointers are handed to match_fn */
+struct dt_rule
+{
+ struct dt_rule_spec spec;
+ struct list_head head;
+ struct ptrblock *exec_match;
+ void *exec_target;
+ __u32 exec_target_size;
+ __u8 deleted;
+ __u8 dt_match_len;
+ struct dt_match first_dt_match[0];
+};
+
+#define IS_RULE(r) (!IS_RLP(r) && \
+ ((struct dt_rule_elem_spec *) (r))->rtype == RT_RULE)
+#define HAS_EXEC_MATCH(r) ((r)->exec_match != NULL)
+#define IS_TARGET_DUMMY(r) ((r)->spec.action == TARGET_DUMMY)
+#define IS_TARGET_NONE(r) ((r)->spec.action == TARGET_NONE)
+#define IS_TARGET_EXEC(r) ((r)->spec.action == TARGET_EXEC)
+#define IS_TARGET_TERM(r) ((r)->spec.action == TARGET_ACCEPT || \
+ (r)->spec.action == TARGET_DROP)
+#define IS_RULE_TERM(r) (IS_TARGET_TERM(r) && !HAS_EXEC_MATCH(r))
+
+/* return the size of a dt_rule with dt_match_len dt_matches */
+static inline __u32
+dt_rule_size(__u8 dt_match_len)
+{
+ return (sizeof(struct dt_rule) +
+ dt_match_len * sizeof(struct dt_match));
+}
+
+/* head of the list of rules */
+struct dt_chain
+{
+ struct list_head head;
+ char name[HIPAC_CHAIN_NAME_MAX_LEN];
+ struct dt_rule *first; // optimization of dimtree_chain_fix
+ __u32 len;
+};
+
+
+
+/* header for elementary intervals */
+struct dt_elem_spec
+{
+ unsigned rlp : 1; // must be 0
+ unsigned rtype : 1; // must be RT_ELEM
+ unsigned newspec : 1; // indicates whether the elementary interval is
+ // contained in newspec
+};
+
+/* elementary interval */
+struct dt_elem
+{
+ struct dt_elem_spec spec;
+ /* terminating target (TARGET_ACCEPT, TARGET_DROP) without function
+ based matches */
+ struct dt_rule *term_rule;
+ /* block of non-terminating rules (function based matches or no
+ terminal target) whose position is < term_rule->spec.pos */
+ struct ptrblock ntm_rules;
+};
+
+#define IS_ELEM(e) (!IS_RLP(e) && \
+ ((struct dt_rule_elem_spec *) (e))->rtype == RT_ELEM)
+
+
+
+struct dimtree
+{
+ __u32 origin;
+ struct gen_spec *top;
+ struct gen_spec *top_new; // new not yet active top level structure
+ int need_commit; // 1 if top_new is valid
+ struct dt_chain *chain;
+};
+
+
+
+/* create new dimtree and store it in *newdt; chain_name is copied to
+ dt->chain->name; memory for newdt is allocated within dimtree_new;
+ origin is a bit vector where exactly one bit is set; it is used to
+ uniquely define the "origin property" of newdt; dummy and policy
+ define the base ruleset; dummy must have TARGET_DUMMY as target,
+ policy must be a terminal rule without any dt_matches;
+ possible errors: HE_LOW_MEMORY, HE_IMPOSSIBLE_CONDITION */
+hipac_error
+dimtree_new(struct dimtree **newdt, __u32 origin, const char *chain_name,
+ struct dt_rule *dummy, struct dt_rule *policy);
+
+/* free memory for dt and all embedded structures; make sure that no packet
+ matching occurs on dt any more */
+void
+dimtree_free(struct dimtree *dt);
+
+/* remove all rules except the first and the last one from dt->chain and
+ free them; set dt->top to the last rule in the chain */
+void
+dimtree_flush(struct dimtree *dt);
+
+const char *
+dimtree_get_chain_name(const struct dimtree *dt);
+
+/* insert rule into the dt_chain and the rlps; inc indicates whether all
+ rule positions >= rule->spec.pos should be incremented by 1;
+ if commit is not 0 then the top level structure in dt is replaced by the
+ new one and the old rlps and elementary intervals are freed;
+ in case of a fault all newly created rlps and elementary intervals
+ are freed; origin is a bit vector describing the allowed dimtrees
+ into which rule may be inserted; if rule must not be inserted into dt
+ it is anyway inserted into dt->chain (so take care to remove it from
+ there);
+ NOTICE: if commit is not 0 it is assumed that this operation is the
+ first one (at all or directly after a previously committed
+ operation or series of operations (-> dimtree_commit))
+ possible errors: HE_LOW_MEMORY, HE_IMPOSSIBLE_CONDITION,
+ HE_RULE_ORIGIN_MISMATCH */
+hipac_error
+dimtree_insert(struct dimtree *dt, struct dt_rule *rule, __u32 origin,
+ int inc, int commit);
+
+/* delete rule from rlp, _NOT_ from the dt_chain; 'rule' must point to a
+ rule in dt->chain; if commit is not 0 then the top level structure in dt
+ is replaced by the new one and the old rlps and elementary intervals
+ are freed; in case of a fault all newly created rlps and elementary
+ intervals are freed;
+ NOTICE: if commit is not 0 it is assumed that this operation is the
+ first one (at all or directly after a previously committed
+ operation or series of operations (-> dimtree_commit))
+ possible errors: HE_LOW_MEMORY, HE_IMPOSSIBLE_CONDITION */
+hipac_error
+dimtree_delete(struct dimtree *dt, struct dt_rule *rule, int commit);
+
+/* called at the end of a successful series of dimtree_insert and/or
+ dimtree_delete operation(s) to make the result visible, i.e. set dt->top
+ to dt->top_new for each dimtree dt in dt_block and free the old rlps
+ and elementary intervals */
+void
+dimtree_commit(struct ptrblock *dt_block);
+
+/* called at the end of an unsuccessful series of dimtree_insert and/or
+ dimtree_delete operation(s) to undo the changes, i.e. set dt->top_new
+ to NULL and need_commit to 0 for each dimtree dt in dt_block and free the
+ new rlps and elementary intervals */
+void
+dimtree_failed(struct ptrblock *dt_block);
+
+#ifdef DEBUG
+int
+rule_occur(struct gen_spec *g, struct dt_rule *rule, int print);
+#endif
+
+/* remove all rules between start and the rule(s) r with position end_pos inc.
+ start and r themselves; the positions of the rules behind r are not
+ changed */
+static inline void
+dimtree_chain_delete(struct dimtree *dt, struct dt_rule *start, __u32 end_pos)
+{
+ struct dt_rule *rule;
+ struct list_head *lh;
+
+ if (unlikely(dt == NULL || start == NULL ||
+ start->spec.pos > end_pos)) {
+ ARG_MSG;
+ return;
+ }
+
+ assert(dt->need_commit == 0);
+ if (start->head.prev == &dt->chain->head) {
+ /* start is the first element => dt->chain->first stays
+ NULL until dimtree_chain_fix has been called */
+ dt->chain->first = NULL;
+ } else if (dt->chain->first != NULL &&
+ dt->chain->first->spec.pos >= start->spec.pos) {
+ dt->chain->first = list_entry(start->head.prev,
+ struct dt_rule, head);
+ }
+ for (lh = &start->head, rule = start; lh != &dt->chain->head &&
+ rule->spec.pos <= end_pos;) {
+ lh = lh->next;
+ list_del(lh->prev);
+#ifdef DEBUG
+ if (rule_occur(dt->top, rule, 1)) {
+ ERR("rule present in original structure");
+ return;
+ }
+#endif
+ if (rule->exec_match != NULL) {
+ ptrblock_free(rule->exec_match);
+ }
+ hp_free(rule);
+ dt->chain->len--;
+ rule = list_entry(lh, struct dt_rule, head);
+ }
+}
+
+/* iterate over the dt_chain in dt and tighten the position numbers */
+void
+dimtree_chain_fix(struct ptrblock *dt_block);
+
+#ifdef DEBUG
+/* matching algorithm used for correctness checks; the returned ptrblock
+ contains the rules matching the packet ordered after their positions;
+ the last rule should always have TARGET_ACCEPT or TARGET_DROP as action
+ and may not contain exec_matches */
+struct ptrblock *
+hipac_match_debug(struct dimtree *dt, const void *packet);
+#endif
+
+#endif
diff -urN nf-hipac/kernel/global.c nfhipac/kernel/global.c
--- nf-hipac/kernel/global.c 1970-01-01 08:00:00.000000000 +0800
+++ nfhipac/kernel/global.c 2014-11-21 12:36:09.000000000 +0800
@@ -0,0 +1,964 @@
+/*
+ * High performance packet classification
+ *
+ *
+ * (c) 2002-2003 hipac core team :
+ * +---------------------------+--------------------------+
+ * | Michael Bellion | Thomas Heinz |
+ * | | |
+ * +---------------------------+--------------------------+
+ *
+ * Licenced under the GNU General Public Licence, version 2.
+ */
+
+
+#include "global.h"
+#include "ihash.h"
+
+#define LEN(array) (sizeof(array) / sizeof(*(array)))
+
+
+__u64 mem_max = 0;
+__u64 mem_current_tight = 0;
+__u64 mem_current_real = 0;
+static struct ihash *memhash = NULL;
+
+
+void *
+hp_alloc(__u32 size, int do_add)
+{
+ __u32 sz_real;
+ void *p;
+
+ if (unlikely(size == 0 || size >= 0x80000000)) {
+ ARG_MSG;
+ return NULL;
+ }
+ if (unlikely(memhash == NULL)) {
+ memhash = ihash_new(INITIAL_MEMHASH_LEN, 1,
+ MEMHASH_AVRG_ELEM_PER_BUCKET,
+ ihash_func_val, eq_val);
+ if (memhash == NULL) {
+ ERR("unable to create memhash");
+ return NULL;
+ }
+ }
+ if (size <= PAGE_SIZE) {
+ sz_real = mini_alloc_size(size);
+ if (unlikely(do_add && mem_current_real + sz_real > mem_max)) {
+ goto mem_max_reached;
+ }
+ p = mini_alloc(size);
+ } else {
+ sz_real = big_alloc_size(size);
+ if (unlikely(do_add && mem_current_real + sz_real > mem_max)) {
+ goto mem_max_reached;
+ }
+ p = big_alloc(size);
+ }
+ if (p == NULL) {
+ return NULL;
+ }
+ if (ihash_insert(&memhash, p,
+ val_to_ptr(((!!do_add) << 31) | size)) < 0) {
+ if (size <= PAGE_SIZE) {
+ mini_free(p);
+ } else {
+ big_free(p);
+ }
+ return NULL;
+ }
+ if (do_add) {
+ mem_current_tight += size;
+ mem_current_real += sz_real;
+ }
+ return p;
+
+ mem_max_reached:
+ return NULL;
+}
+
+void
+hp_free(void *p)
+{
+ __u32 size, sz_real, do_add;
+ void *inf;
+
+ if (unlikely(p == NULL)) {
+ ARG_MSG;
+ return;
+ }
+ if (unlikely(memhash == NULL)) {
+ ERR("hp_free called before hp_alloc");
+ return;
+ }
+ inf = ihash_lookup_val(memhash, p);
+ if (unlikely(inf == NULL)) {
+ ERR("pointer %p not in memhash", p);
+ return;
+ }
+ size = ptr_to_val(inf);
+ do_add = size & 0x80000000;
+ size &= 0x7FFFFFFF;
+ if (size <= PAGE_SIZE) {
+ mini_free(p);
+ if (unlikely(ihash_delete(memhash, p, NULL) < 0)) {
+ goto hashdel_failed;
+ }
+ if (!do_add) {
+ return;
+ }
+ sz_real = mini_alloc_size(size);
+ } else {
+ big_free(p);
+ if (unlikely(ihash_delete(memhash, p, NULL) < 0)) {
+ goto hashdel_failed;
+ }
+ if (!do_add) {
+ return;
+ }
+ sz_real = big_alloc_size(size);
+ }
+ mem_current_tight -= size;
+ mem_current_real -= sz_real;
+ return;
+
+ hashdel_failed:
+ ERR("memhash delete failed");
+ return;
+}
+
+void *
+hp_realloc(void *p, __u32 newsize)
+{
+ __u32 sz, sz_real, newsz_real, do_add;
+ void *inf, *newp;
+
+ if (unlikely(newsize == 0 || newsize >= 0x80000000 || p == NULL)) {
+ ARG_MSG;
+ return NULL;
+ }
+ if (unlikely(memhash == NULL)) {
+ ERR("hp_realloc called before hp_alloc");
+ return NULL;
+ }
+ inf = ihash_lookup_val(memhash, p);
+ if (unlikely(inf == NULL)) {
+ ERR("pointer %p not in memhash\n", p);
+ return NULL;
+ }
+ sz = ptr_to_val(inf);
+ do_add = sz & 0x80000000;
+ sz &= 0x7FFFFFFF;
+ sz_real = sz <= PAGE_SIZE ? mini_alloc_size(sz) : big_alloc_size(sz);
+ if (newsize <= PAGE_SIZE) {
+ newsz_real = mini_alloc_size(newsize);
+ if (sz_real == newsz_real) {
+ goto only_size_change;
+ }
+ if (unlikely(do_add && mem_current_real + newsz_real >
+ mem_max + sz_real)) {
+ if (newsize <= sz) {
+ goto only_size_change;
+ }
+ goto mem_max_reached;
+ }
+ newp = mini_alloc(newsize);
+ } else {
+ newsz_real = big_alloc_size(newsize);
+ if (sz_real == newsz_real) {
+ goto only_size_change;
+ }
+ if (unlikely(do_add && mem_current_real + newsz_real >
+ mem_max + sz_real)) {
+ if (newsize <= sz) {
+ goto only_size_change;
+ }
+ goto mem_max_reached;
+ }
+ newp = big_alloc(newsize);
+ }
+ if (newp == NULL) {
+ if (newsize <= sz) {
+ goto only_size_change;
+ }
+ return NULL;
+ }
+ if (unlikely(ihash_replace(&memhash, p, NULL, newp,
+ val_to_ptr(((!!do_add) << 31) |
+ newsize)) < 0)) {
+ if (newsize <= PAGE_SIZE) {
+ mini_free(newp);
+ } else {
+ big_free(newp);
+ }
+ if (newsize <= sz) {
+ goto only_size_change;
+ }
+ return NULL;
+ }
+ memcpy(newp, p, sz < newsize ? sz : newsize);
+ if (sz <= PAGE_SIZE) {
+ mini_free(p);
+ } else {
+ big_free(p);
+ }
+ if (do_add) {
+ mem_current_tight += newsize;
+ mem_current_tight -= sz;
+ mem_current_real += newsz_real;
+ mem_current_real -= sz_real;
+ }
+ return newp;
+
+ mem_max_reached:
+ return NULL;
+
+ only_size_change:
+ if (unlikely(ihash_replace(&memhash, p, NULL, p,
+ val_to_ptr(((!!do_add) << 31) |
+ newsize)) < 0)) {
+ ERR("unable to replace memhash entry");
+ return NULL;
+ }
+ if (do_add) {
+ mem_current_tight += newsize;
+ mem_current_tight -= sz;
+ }
+ return p;
+}
+
+hipac_error
+hp_size(void *p, __u64 *size_real, __u64 *size_tight)
+{
+ void *inf;
+ __u32 size;
+
+ if (unlikely(size_real == NULL || size_tight == NULL)) {
+ ARG_ERR;
+ }
+ if (unlikely(p == NULL)) {
+ return HE_OK;
+ }
+ inf = ihash_lookup_val(memhash, p);
+ if (unlikely(inf == NULL)) {
+ IMPOSSIBLE_CONDITION("size request for unkown pointer");
+ }
+ size = ((__u32) ptr_to_val(inf)) & 0x7FFFFFFF;
+ *size_tight += size;
+ *size_real += size <= PAGE_SIZE ? mini_alloc_size(size) :
+ big_alloc_size(size);
+ return HE_OK;
+}
+
+void
+hp_mem_exit(void)
+{
+ if (unlikely(memhash == NULL)) {
+ return;
+ }
+ if (unlikely(memhash->elem_ct != 0)) {
+ WARN_("memhash still contains unfreed pointers");
+ }
+ if (unlikely(mem_current_tight != 0)) {
+ WARN_("mem_current_tight is not 0");
+ }
+ if (unlikely(mem_current_real != 0)) {
+ WARN_("mem_current_real is not 0");
+ }
+ ihash_free(memhash);
+ memhash = NULL;
+}
+
+hipac_error
+hipac_get_mem_stat(struct hipac_mem_stat *stat)
+{
+ struct ihash_stat istat;
+
+ if (stat == NULL) {
+ ARG_ERR;
+ }
+ if (sizeof(istat.bucket_dist) != sizeof(stat->memhash_bucket_stat)) {
+ IMPOSSIBLE_CONDITION("struct ihash_stat and struct "
+ "hipac_mem_stat incompatible");
+ }
+ if (ihash_stat(memhash, &istat) < 0) {
+ IMPOSSIBLE_CONDITION("ihash_stat failed");
+ }
+
+ stat->total_mem_tight = mem_current_tight;
+ stat->total_mem_real = mem_current_real;
+ stat->memhash_elem_num = istat.elem_ct;
+ stat->memhash_len = istat.bucket_len;
+ stat->memhash_smallest_bucket_len = istat.small_bucket_len;
+ stat->memhash_biggest_bucket_len = istat.big_bucket_len;
+ memcpy(stat->memhash_bucket_stat, istat.bucket_dist,
+ sizeof(istat.bucket_dist));
+ return HE_OK;
+}
+
+
+
+/*
+ * statistical distributions
+ */
+
+void
+stat_distribution_add(__u32 dist[], __u32 len, __u32 val)
+{
+ __u32 i;
+
+ if (unlikely(dist == NULL || len == 0)) {
+ ARG_MSG;
+ return;
+ }
+
+ for (i = 0; i < len - 1; i++) {
+ if (val <= (1 << i) - 1) {
+ dist[i]++;
+ return;
+ }
+ }
+ dist[i]++;
+}
+
+
+
+/*
+ * pointer block
+ */
+
+struct ptrblock *
+ptrblock_new(void *p, int do_add)
+{
+ struct ptrblock *new;
+
+ if (unlikely(p == NULL)) {
+ ARG_MSG;
+ return NULL;
+ }
+ new = hp_alloc(sizeof(*new) + sizeof(*new->p), do_add);
+ if (new == NULL) {
+ return NULL;
+ }
+ new->len = 1;
+ new->p[0] = p;
+ return new;
+}
+
+int
+ptrblock_eq(const struct ptrblock *b1, const struct ptrblock *b2)
+{
+ __u32 i;
+
+ if (b1 == b2) {
+ return 1;
+ }
+ if (b1 == NULL || b2 == NULL || b1->len != b2->len) {
+ return 0;
+ }
+ /* b1->len == 0 is valid if b1 and b2 are embedded ptrblocks */
+ for (i = 0; i < b1->len; i++) {
+ if (b1->p[i] != b2->p[i]) {
+ return 0;
+ }
+ }
+ return 1;
+}
+
+hipac_error
+ptrblock_clone(struct ptrblock *b, struct ptrblock **clone)
+{
+ __u32 sz;
+
+ if (unlikely(clone == NULL)) {
+ ARG_ERR;
+ }
+
+ if (b == NULL) {
+ *clone = NULL;
+ return HE_OK;
+ }
+ sz = ptrblock_size(b);
+ *clone = hp_alloc(sz, 1);
+ if (*clone == NULL) {
+ return HE_LOW_MEMORY;
+ }
+ memcpy(*clone, b, sz);
+ return HE_OK;
+}
+
+hipac_error
+ptrblock_insert(struct ptrblock **b, void *p, __u32 pos)
+{
+ struct ptrblock *new;
+
+ if (unlikely(p == NULL || b == NULL || (*b == NULL && pos > 0) ||
+ (*b != NULL && pos > (*b)->len))) {
+ ARG_ERR;
+ }
+
+ if (*b == NULL) {
+ new = ptrblock_new(p, 1);
+ if (new == NULL) {
+ return HE_LOW_MEMORY;
+ }
+ *b = new;
+ return HE_OK;
+ }
+ new = hp_realloc(*b, sizeof(**b) + ((*b)->len + 1) * sizeof(*(*b)->p));
+ if (new == NULL) {
+ return HE_LOW_MEMORY;
+ }
+ if (new->len > pos) {
+ memmove(&new->p[pos + 1], &new->p[pos], (new->len - pos) *
+ sizeof(*new->p));
+ }
+ new->len++;
+ new->p[pos] = p;
+ *b = new;
+ return HE_OK;
+}
+
+hipac_error
+ptrblock_insert_embed(void **o, __u32 ptrblock_offset, void *p, __u32 pos)
+{
+ struct ptrblock *b;
+ void *new;
+
+ if (unlikely(o == NULL || *o == NULL || p == NULL ||
+ pos > ((struct ptrblock *)
+ ((char *) *o + ptrblock_offset))->len)) {
+ ARG_ERR;
+ }
+ b = (struct ptrblock *) ((char *) *o + ptrblock_offset);
+ new = hp_realloc(*o, ptrblock_offset + sizeof(*b) +
+ (b->len + 1) * sizeof(*b->p));
+ if (new == NULL) {
+ return HE_LOW_MEMORY;
+ }
+ b = (struct ptrblock *) ((char *) new + ptrblock_offset);
+ if (b->len > pos) {
+ memmove(&b->p[pos + 1], &b->p[pos], (b->len - pos) *
+ sizeof(*b->p));
+ }
+ b->len++;
+ b->p[pos] = p;
+ *o = new;
+ return HE_OK;
+}
+
+hipac_error
+ptrblock_append(struct ptrblock **b, void *p)
+{
+ struct ptrblock *new;
+
+ if (unlikely(p == NULL || b == NULL)) {
+ ARG_ERR;
+ }
+
+ if (*b == NULL) {
+ new = ptrblock_new(p, 1);
+ if (new == NULL) {
+ return HE_LOW_MEMORY;
+ }
+ *b = new;
+ return HE_OK;
+ }
+ new = hp_realloc(*b, sizeof(**b) + ((*b)->len + 1) * sizeof(*(*b)->p));
+ if (new == NULL) {
+ return HE_LOW_MEMORY;
+ }
+#ifdef DEBUG
+ {
+ __u32 i;
+ for (i = 0; i < new->len; i++) {
+ if (new->p[i] == p) {
+ IMPOSSIBLE_CONDITION("ptrblock contains "
+ "duplicated pointer");
+ }
+ }
+ }
+#endif
+ new->p[new->len++] = p;
+ *b = new;
+ return HE_OK;
+}
+
+hipac_error
+ptrblock_delete_pos(struct ptrblock **b, __u32 pos)
+{
+ struct ptrblock *new;
+
+ if (unlikely(b == NULL || *b == NULL || pos >= (*b)->len)) {
+ ARG_ERR;
+ }
+
+ if ((*b)->len == 1) {
+ ptrblock_free(*b);
+ *b = NULL;
+ return HE_OK;
+ }
+ (*b)->len--;
+ if ((*b)->len > pos) {
+ memmove(&(*b)->p[pos], &(*b)->p[pos + 1],
+ ((*b)->len - pos) * sizeof(*(*b)->p));
+ }
+ new = hp_realloc(*b, sizeof(**b) + (*b)->len * sizeof(*(*b)->p));
+ if (new == NULL) {
+ WARN_("hp_realloc returns NULL although less memory was "
+ "requested");
+ } else {
+ *b = new;
+ }
+ return HE_OK;
+}
+
+hipac_error
+ptrblock_delete_pos_embed(void **o, __u32 ptrblock_offset, __u32 pos)
+{
+ struct ptrblock *new;
+ struct ptrblock *b;
+
+ if (unlikely(o == NULL || *o == NULL ||
+ pos >= ((struct ptrblock *)
+ ((char *) *o + ptrblock_offset))->len)) {
+ ARG_ERR;
+ }
+ b = (struct ptrblock *) ((char *) *o + ptrblock_offset);
+ b->len--;
+ if (b->len > pos) {
+ memmove(&b->p[pos], &b->p[pos + 1],
+ (b->len - pos) * sizeof(*b->p));
+ }
+ new = hp_realloc(*o, ptrblock_offset + sizeof(*b) +
+ b->len * sizeof(*b->p));
+ if (new == NULL) {
+ WARN_("hp_realloc returns NULL although less memory was "
+ "requested");
+ } else {
+ *o = new;
+ }
+ return HE_OK;
+}
+
+hipac_error
+ptrblock_delete(struct ptrblock **b, void *p)
+{
+ __u32 i;
+
+ if (unlikely(b == NULL || *b == NULL)) {
+ ARG_ERR;
+ }
+ for (i = 0; i < (*b)->len; i++) {
+ if ((*b)->p[i] == p) {
+ return ptrblock_delete_pos(b, i);
+ }
+ }
+ IMPOSSIBLE_CONDITION("pointer %p not in ptrblock", p);
+}
+
+hipac_error
+ptrblock_delete_tail(struct ptrblock **b)
+{
+ struct ptrblock *new;
+
+ if (unlikely(b == NULL || *b == NULL)) {
+ ARG_ERR;
+ }
+
+ if ((*b)->len == 1) {
+ ptrblock_free(*b);
+ *b = NULL;
+ return HE_OK;
+ }
+ (*b)->len--;
+ new = hp_realloc(*b, sizeof(**b) + (*b)->len * sizeof(*(*b)->p));
+ if (new == NULL) {
+ WARN_("hp_realloc returns NULL although less memory was "
+ "requested");
+ } else {
+ *b = new;
+ }
+ return HE_OK;
+}
+
+hipac_error
+ptrblock_delete_multi(struct ptrblock **b, const struct ptrblock *mark)
+{
+ struct ptrblock *new;
+ __u32 first, last, i;
+
+ if (unlikely(b == NULL || mark == NULL ||
+ (*b != NULL && mark->len < (*b)->len))) {
+ ARG_ERR;
+ }
+
+ if (*b == NULL) {
+ return HE_OK;
+ }
+ for (first = 0; first < (*b)->len && mark->p[first] != NULL; first++);
+ if (first == (*b)->len) {
+ /* nothing to delete */
+ return HE_OK;
+ }
+ for (last = first + 1, i = 0; last < (*b)->len; last++) {
+ if (mark->p[last] != NULL) {
+ continue;
+ }
+ if (last > first + 1) {
+ memmove(&(*b)->p[first - i], &(*b)->p[first + 1],
+ (last - first - 1) * sizeof(*(*b)->p));
+ }
+ i++;
+ first = last;
+ }
+ if ((*b)->len > first + 1) {
+ memmove(&(*b)->p[first - i], &(*b)->p[first + 1],
+ ((*b)->len - first - 1) * sizeof(*(*b)->p));
+ }
+ (*b)->len -= i + 1;
+ if ((*b)->len == 0) {
+ ptrblock_free(*b);
+ *b = NULL;
+ return HE_OK;
+ }
+ new = hp_realloc(*b, sizeof(**b) + (*b)->len * sizeof(*(*b)->p));
+ if (new == NULL) {
+ WARN_("hp_realloc returns NULL although less memory was "
+ "requested");
+ } else {
+ *b = new;
+ }
+ return HE_OK;
+}
+
+hipac_error
+ptrblock_delete_null(struct ptrblock **b)
+{
+ struct ptrblock *new;
+ __u32 first, last, i;
+
+ if (unlikely(b == NULL)) {
+ ARG_ERR;
+ }
+
+ if (*b == NULL) {
+ return HE_OK;
+ }
+ for (first = 0; first < (*b)->len && (*b)->p[first] != NULL; first++);
+ if (first == (*b)->len) {
+ /* nothing to delete */
+ return HE_OK;
+ }
+ for (last = first + 1, i = 0; last < (*b)->len; last++) {
+ if ((*b)->p[last] != NULL) {
+ continue;
+ }
+ if (last > first + 1) {
+ memmove(&(*b)->p[first - i], &(*b)->p[first + 1],
+ (last - first - 1) * sizeof(*(*b)->p));
+ }
+ i++;
+ first = last;
+ }
+ if ((*b)->len > first + 1) {
+ memmove(&(*b)->p[first - i], &(*b)->p[first + 1],
+ ((*b)->len - first - 1) * sizeof(*(*b)->p));
+ }
+ (*b)->len -= i + 1;
+ if ((*b)->len == 0) {
+ ptrblock_free(*b);
+ *b = NULL;
+ return HE_OK;
+ }
+ new = hp_realloc(*b, sizeof(**b) + (*b)->len * sizeof(*(*b)->p));
+ if (new == NULL) {
+ WARN_("hp_realloc returns NULL although less memory was "
+ "requested");
+ } else {
+ *b = new;
+ }
+ return HE_OK;
+}
+
+
+
+/*
+ * block of structs
+ */
+struct strblock *
+strblock_new(const void *s, __u32 size, int do_add)
+{
+ struct strblock *new;
+
+ if (unlikely(s == NULL || size == 0)) {
+ ARG_MSG;
+ return NULL;
+ }
+ new = hp_alloc(sizeof(*new) + size, do_add);
+ if (new == NULL) {
+ return NULL;
+ }
+ new->len = 1;
+ new->size = size;
+ memcpy(new->d, s, size);
+ return new;
+}
+
+int
+strblock_eq(const struct strblock *b1, const struct strblock *b2,
+ int (* eq) (void *, void *))
+{
+ __u32 i;
+
+ if (b1 == b2) {
+ return 1;
+ }
+ if (b1 == NULL || b2 == NULL || b1->len != b2->len ||
+ b1->size != b2->size) {
+ return 0;
+ }
+ assert(b1->len > 0);
+ for (i = 0; i < b1->len; i++) {
+ if (!eq(STRBLOCK_ITH(b1, i, void *),
+ STRBLOCK_ITH(b2, i, void *))) {
+ return 0;
+ }
+ }
+ return 1;
+}
+
+hipac_error
+strblock_clone(struct strblock *b, struct strblock **clone)
+{
+ __u32 sz;
+
+ if (unlikely(clone == NULL)) {
+ ARG_ERR;
+ }
+
+ if (b == NULL) {
+ *clone = NULL;
+ return HE_OK;
+ }
+ sz = strblock_size(b);
+ *clone = hp_alloc(sz, 1);
+ if (*clone == NULL) {
+ return HE_LOW_MEMORY;
+ }
+ memcpy(*clone, b, sz);
+ return HE_OK;
+}
+
+hipac_error
+strblock_insert(struct strblock **b, const void *s, __u32 size, __u32 pos)
+{
+ struct strblock *new;
+
+ if (unlikely(s == NULL || b == NULL ||
+ (*b == NULL && (pos > 0 || size == 0)) ||
+ (*b != NULL && (pos > (*b)->len ||
+ (*b)->size != size)))) {
+ ARG_ERR;
+ }
+
+ if (*b == NULL) {
+ new = strblock_new(s, size, 1);
+ if (new == NULL) {
+ return HE_LOW_MEMORY;
+ }
+ *b = new;
+ return HE_OK;
+ }
+ new = hp_realloc(*b, sizeof(**b) + ((*b)->len + 1) * size);
+ if (new == NULL) {
+ return HE_LOW_MEMORY;
+ }
+ if (new->len > pos) {
+ memmove(STRBLOCK_ITH(new, pos + 1, void *),
+ STRBLOCK_ITH(new, pos, void *),
+ (new->len - pos) * size);
+ }
+ new->len++;
+ memcpy(STRBLOCK_ITH(new, pos, void *), s, size);
+ *b = new;
+ return HE_OK;
+}
+
+hipac_error
+strblock_append(struct strblock **b, const void *s, __u32 size)
+{
+ struct strblock *new;
+
+ if (unlikely(s == NULL || b == NULL || (*b == NULL && size == 0) ||
+ (*b != NULL && (*b)->size != size))) {
+ ARG_ERR;
+ }
+
+ if (*b == NULL) {
+ new = strblock_new(s, size, 1);
+ if (new == NULL) {
+ return HE_LOW_MEMORY;
+ }
+ *b = new;
+ return HE_OK;
+ }
+ new = hp_realloc(*b, sizeof(**b) + ((*b)->len + 1) * size);
+ if (new == NULL) {
+ return HE_LOW_MEMORY;
+ }
+ memcpy(STRBLOCK_ITH(new, new->len, void *), s, size);
+ new->len++;
+ *b = new;
+ return HE_OK;
+}
+
+hipac_error
+strblock_delete_pos(struct strblock **b, __u32 pos)
+{
+ struct strblock *new;
+
+ if (unlikely(b == NULL || *b == NULL || pos >= (*b)->len)) {
+ ARG_ERR;
+ }
+
+ if ((*b)->len == 1) {
+ strblock_free(*b);
+ *b = NULL;
+ return HE_OK;
+ }
+ (*b)->len--;
+ if ((*b)->len > pos) {
+ memmove(STRBLOCK_ITH(*b, pos, void *),
+ STRBLOCK_ITH(*b, pos + 1, void *),
+ ((*b)->len - pos) * (*b)->size);
+ }
+ new = hp_realloc(*b, sizeof(**b) + (*b)->len * (*b)->size);
+ if (new == NULL) {
+ WARN_("hp_realloc returns NULL although less memory was "
+ "requested");
+ } else {
+ *b = new;
+ }
+ return HE_OK;
+}
+
+hipac_error
+strblock_delete_tail(struct strblock **b)
+{
+ struct strblock *new;
+
+ if (unlikely(b == NULL || *b == NULL)) {
+ ARG_ERR;
+ }
+
+ if ((*b)->len == 1) {
+ strblock_free(*b);
+ *b = NULL;
+ return HE_OK;
+ }
+ (*b)->len--;
+ new = hp_realloc(*b, sizeof(**b) + (*b)->len * (*b)->size);
+ if (new == NULL) {
+ WARN_("hp_realloc returns NULL although less memory was "
+ "requested");
+ } else {
+ *b = new;
+ }
+ return HE_OK;
+}
+
+
+
+/*
+ * pointer list
+ */
+
+struct ptrlist *
+ptrlist_new(void)
+{
+ struct ptrlist *new;
+
+ new = mini_alloc(sizeof(*new));
+ if (new == NULL) {
+ return NULL;
+ }
+ new->len = 0;
+ INIT_LIST_HEAD(&new->head);
+ return new;
+}
+
+struct ptrlist_entry *
+ptrlist_new_entry(void *p)
+{
+ struct ptrlist_entry *new;
+
+ new = mini_alloc(sizeof(*new));
+ if (new == NULL) {
+ return NULL;
+ }
+ new->p = p;
+ return new;
+}
+
+void
+ptrlist_flush(struct ptrlist *l)
+{
+ struct list_head *lh;
+
+ if (unlikely(l == NULL)) {
+ ARG_MSG;
+ return;
+ }
+ for (lh = l->head.next; lh != &l->head;) {
+ lh = lh->next;
+ mini_free(list_entry(lh->prev, struct ptrlist_entry, head));
+ }
+ INIT_LIST_HEAD(&l->head);
+ l->len = 0;
+}
+
+void
+ptrlist_free(struct ptrlist *l)
+{
+ struct list_head *lh;
+
+ if (unlikely(l == NULL)) {
+ ARG_MSG;
+ return;
+ }
+ for (lh = l->head.next; lh != &l->head;) {
+ lh = lh->next;
+ mini_free(list_entry(lh->prev, struct ptrlist_entry, head));
+ }
+ mini_free(l);
+}
+
+hipac_error
+ptrlist_add(struct ptrlist *l, void *p, int check_dup)
+{
+ struct list_head *lh;
+ struct ptrlist_entry* e;
+
+ if (unlikely(l == NULL || p == NULL)) {
+ ARG_ERR;
+ }
+ if (unlikely(check_dup)) {
+ list_for_each(lh, &l->head) {
+ e = list_entry(lh, struct ptrlist_entry, head);
+ if (e->p == p) {
+ IMPOSSIBLE_CONDITION("pointer %p already in "
+ "ptrlist", p);
+ }
+ }
+ }
+ e = mini_alloc(sizeof(*e));
+ if (e == NULL) {
+ return HE_LOW_MEMORY;
+ }
+ e->p = p;
+ list_add_tail(&e->head, &l->head);
+ l->len++;
+ return HE_OK;
+}
diff -urN nf-hipac/kernel/global.h nfhipac/kernel/global.h
--- nf-hipac/kernel/global.h 1970-01-01 08:00:00.000000000 +0800
+++ nfhipac/kernel/global.h 2014-11-21 12:36:09.000000000 +0800
@@ -0,0 +1,388 @@
+/*
+ * High performance packet classification
+ *
+ *
+ * (c) 2004-2005 MARA Systems AB
+ * +-----------------------------+
+ * | Michael Bellion |
+ * | |
+ * +-----------------------------+
+ *
+ * (c) 2002-2003 hipac core team :
+ * +---------------------------+--------------------------+
+ * | Michael Bellion | Thomas Heinz |
+ * | | |
+ * +---------------------------+--------------------------+
+ *
+ * Licenced under the GNU General Public Licence, version 2.
+ */
+
+
+#ifndef _GLOBAL_H
+#define _GLOBAL_H
+//#define DEBUG 1
+
+#include "mode.h"
+#include "hipac.h" // hipac_error
+
+#define INITIAL_MEMHASH_LEN 64
+#define MEMHASH_AVRG_ELEM_PER_BUCKET 15
+#define INITIAL_NEWSPEC_LEN 1024
+#define NEWSPEC_AVRG_ELEM_PER_BUCKET 3
+
+#ifdef DEBUG
+# define DPRINT(type, str, args...) if ((type) & hipac_debug) \
+ printk(str , ## args)
+#else
+# define DPRINT(type, str, args...) do {} while (0)
+#endif
+
+/* the single space before the last ',' is vital to make this macro work the
+ expected way because of some idiosyncrasy of gcc */
+#ifdef DEBUG
+# define MSG(type, str, args...) \
+ printk(type "%-15s : %-30s : %6d : " str "\n", __FILE__, __FUNCTION__, \
+ __LINE__ , ## args)
+#else
+# define MSG(type, str, args...) \
+ printk("%s:%s:%d: " str "\n", __FILE__, __FUNCTION__, \
+ __LINE__ , ## args)
+#endif
+
+#define ERR(str, args...) MSG(KERN_ERR, str , ## args)
+#define WARN_(str, args...) MSG(KERN_WARNING, str , ## args)
+#define NOTICE(str, args...) MSG(KERN_NOTICE, str , ## args)
+#define DBG(str, args...) MSG(KERN_DEBUG, str , ## args)
+#define ARG_MSG MSG(KERN_ERR, "function arguments invalid")
+
+#define ARG_ERR \
+do { \
+ MSG(KERN_ERR, "function arguments invalid"); \
+ return HE_IMPOSSIBLE_CONDITION; \
+} while (0)
+
+#define IMPOSSIBLE_CONDITION(str, args...) \
+do { \
+ MSG(KERN_ERR, str , ## args); \
+ return HE_IMPOSSIBLE_CONDITION; \
+} while (0)
+
+
+
+/* generic header for dimtree rules, elementary intervals and rlps */
+struct gen_spec
+{
+ unsigned rlp : 1;
+};
+
+/* dimid to bittype array */
+extern __u8 *dim2btype;
+
+/* match executor function */
+extern hipac_match_exec_t match_fn;
+
+/* target executor function */
+extern hipac_target_exec_t target_fn;
+
+/* dimension extractor function */
+extern hipac_extract_t *extract_fn;
+
+
+
+/*
+ * memory management wrappers
+ */
+
+/* upper bound for memory consumption in bytes */
+extern __u64 mem_max;
+
+/* current memory consumption in bytes in terms of how much
+ has been requested */
+extern __u64 mem_current_tight;
+
+/* current memory consumption in bytes in terms of how much
+ has actually been allocated */
+extern __u64 mem_current_real;
+
+/* do_add indicates whether mem_current_tight and mem_current_real
+ should be updated or not */
+void *
+hp_alloc(__u32 size, int do_add);
+
+void
+hp_free(void *p);
+
+void *
+hp_realloc(void *p, __u32 newsize);
+
+/* add the number of bytes requested for p to *size_tight and the number
+ of bytes allocated for p to *size_real; if p is NULL, size_tight and
+ size_real are not modified;
+ possible errors: HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hp_size(void *p, __u64 *size_real, __u64 *size_tight);
+
+/* internal memhash is freed; if it is not empty a warning is printed */
+void
+hp_mem_exit(void);
+
+
+
+/*
+ * statistical distributions
+ */
+
+/* initialize statistical distribution dist of length len, i.e. set it to 0 */
+static inline void
+stat_distribution_init(__u32 dist[], __u32 len)
+{
+ if (unlikely(dist == NULL || len == 0)) {
+ ARG_MSG;
+ return;
+ }
+ memset(dist, 0, len * sizeof(*dist));
+}
+
+/* dist is an array of length len representing a statistical distribution;
+ val is added to dist */
+void
+stat_distribution_add(__u32 dist[], __u32 len, __u32 val);
+
+
+
+/*
+ * pointer block
+ */
+struct ptrblock
+{
+ __u32 len;
+ void *p[0];
+};
+
+/* return new pointer block with p as the only element; do_add indicates
+ whether mem_current_tight and mem_current_real should be updated or not */
+struct ptrblock *
+ptrblock_new(void *p, int do_add);
+
+static inline void
+ptrblock_free(struct ptrblock *b)
+{
+ hp_free(b);
+}
+
+static inline __u32
+ptrblock_size(const struct ptrblock *b)
+{
+ if (unlikely(b == NULL)) {
+ ARG_MSG;
+ return 0;
+ }
+ return sizeof(*b) + b->len * sizeof(*b->p);
+}
+
+/* returns 1 if b1 and b2 are equal and 0 otherwise; b1->len or b2->len might
+ be 0 in order allow equality test on embedded ptrblocks */
+int
+ptrblock_eq(const struct ptrblock *b1, const struct ptrblock *b2);
+
+/* clone b and store the result in clone; the memory for clone is allocated
+ via hp_alloc if necessary and do_add is 1; b might be NULL;
+ possible errors: HE_LOW_MEMORY, HE_IMPOSSIBLE_CONDITION */
+hipac_error
+ptrblock_clone(struct ptrblock *b, struct ptrblock **clone);
+
+/* insert p into b at position pos; if *b is NULL and pos is 0
+ ptrblock_new(p, 1) is called and the result is assigned to b;
+ possible errors: HE_LOW_MEMORY, HE_IMPOSSIBLE_CONDITION */
+hipac_error
+ptrblock_insert(struct ptrblock **b, void *p, __u32 pos);
+
+/* insert p into (struct ptrblock *) ((char *) *o + ptrblock_offset)
+ at position pos; o is assumed to end after the embedded ptrblock;
+ hp_realloc is used to resize o;
+ possible errors: HE_LOW_MEMORY, HE_IMPOSSIBLE_CONDITION */
+hipac_error
+ptrblock_insert_embed(void **o, __u32 ptrblock_offset, void *p, __u32 pos);
+
+/* append p to b; if *b is NULL ptrblock_new(p, 1) is called and the result
+ is assigned to b;
+ possible errors: HE_LOW_MEMORY, HE_IMPOSSIBLE_CONDITION */
+hipac_error
+ptrblock_append(struct ptrblock **b, void *p);
+
+/* delete pointer at position pos in b; if b contains only one element and
+ pos is 0 then *b is freed and NULL is assigned to *b;
+ possible errors: HE_IMPOSSIBLE_CONDITION */
+hipac_error
+ptrblock_delete_pos(struct ptrblock **b, __u32 pos);
+
+/* delete pointer at position pos in
+ (struct ptrblock *) ((char *) *o + ptrblock_offset); o is assumed to end
+ after the embedded ptrblock; hp_realloc is used to resize o;
+ possible errors: HE_LOW_MEMORY, HE_IMPOSSIBLE_CONDITION */
+hipac_error
+ptrblock_delete_pos_embed(void **o, __u32 ptrblock_offset, __u32 pos);
+
+/* delete p in b; if p is the only element in b then *b is freed and NULL
+ is assigned to *b;
+ possible errors: HE_IMPOSSIBLE_CONDITION */
+hipac_error
+ptrblock_delete(struct ptrblock **b, void *p);
+
+/* delete trailing pointer in b; if b contains only one element then *b is
+ freed and NULL is assigned to *b;
+ possible errors: HE_IMPOSSIBLE_CONDITION */
+hipac_error
+ptrblock_delete_tail(struct ptrblock **b);
+
+/* for all mark->p[i] == NULL: delete the pointer at the position i fom b;
+ if b is empty after the delete operation NULL is assigned to *b;
+ note that mark->len must be >= (*b)->len;
+ possible errors: HE_IMPOSSIBLE_CONDITION */
+hipac_error
+ptrblock_delete_multi(struct ptrblock **b, const struct ptrblock *mark);
+
+/* similar to ptrblock_delete_multi: the pointers in b which are NULL are
+ deleted;
+ possible errors: HE_IMPOSSIBLE_CONDITION */
+hipac_error
+ptrblock_delete_null(struct ptrblock **b);
+
+
+
+/*
+ * block of structs
+ */
+struct strblock
+{
+ __u32 len, size;
+ char d[0];
+};
+
+#define STRBLOCK_ITH(b, i, cast) ((cast) ((b)->d + (i) * (b)->size))
+
+/* return new struct block with s as the only element; size is the size of
+ the struct pointed to by s in bytes; do_add indicates whether
+ mem_current_tight and mem_current_real should be updated or not */
+struct strblock *
+strblock_new(const void *s, __u32 size, int do_add);
+
+static inline void
+strblock_free(struct strblock *b)
+{
+ hp_free(b);
+}
+
+static inline __u32
+strblock_size(const struct strblock *b)
+{
+ if (unlikely(b == NULL)) {
+ ARG_MSG;
+ return 0;
+ }
+ return sizeof(*b) + b->len * b->size;
+}
+
+/* returns 1 if b1 and b2 are equal and 0 otherwise; eq is an equality test
+ function for the embedded structs; eq(a, b) returns 1 if a equals to b
+ and 0 otherwise */
+int
+strblock_eq(const struct strblock *b1, const struct strblock *b2,
+ int (* eq) (void *, void *));
+
+/* clone b and store the result in clone; the memory for clone is allocated
+ via hp_alloc if necessary and do_add is 1; b might be NULL;
+ possible errors: HE_LOW_MEMORY, HE_IMPOSSIBLE_CONDITION */
+hipac_error
+strblock_clone(struct strblock *b, struct strblock **clone);
+
+/* insert struct s into b at position pos; if *b is NULL and pos is 0
+ strblock_new(s, size, 1) is called and the result is assigned to b;
+ possible errors: HE_LOW_MEMORY, HE_IMPOSSIBLE_CONDITION */
+hipac_error
+strblock_insert(struct strblock **b, const void *s, __u32 size, __u32 pos);
+
+/* append struct s to b; if *b is NULL then strblock_new(s, size, 1) is
+ called and the result is assigned to b;
+ possible errors: HE_LOW_MEMORY, HE_IMPOSSIBLE_CONDITION */
+hipac_error
+strblock_append(struct strblock **b, const void *s, __u32 size);
+
+/* delete struct at position pos in b; if b contains only one element and
+ pos is 0 then *b is freed and NULL is assigned to *b;
+ possible errors: HE_IMPOSSIBLE_CONDITION */
+hipac_error
+strblock_delete_pos(struct strblock **b, __u32 pos);
+
+/* delete trailing struct in b; if b contains only one element then *b is
+ freed and NULL is assigned to *b;
+ possible errors: HE_IMPOSSIBLE_CONDITION */
+hipac_error
+strblock_delete_tail(struct strblock **b);
+
+
+
+/*
+ * pointer list
+ */
+struct ptrlist
+{
+ struct list_head head;
+ __u32 len;
+};
+
+struct ptrlist_entry
+{
+ struct list_head head;
+ void *p;
+};
+
+/* return new empty pointer list or NULL if allocation fails */
+struct ptrlist *
+ptrlist_new(void);
+
+/* return new pointer list entry containing p or NULL if allocation fails */
+struct ptrlist_entry *
+ptrlist_new_entry(void *p);
+
+/* free all entries from the pointer list l */
+void
+ptrlist_flush(struct ptrlist *l);
+
+/* free all entries from the pointer list l and l itself */
+void
+ptrlist_free(struct ptrlist *l);
+
+/* free ptrlist entry */
+static inline void
+ptrlist_free_entry(struct ptrlist_entry *e)
+{
+ if (unlikely(e == NULL)) {
+ ARG_MSG;
+ return;
+ }
+ list_del(&e->head);
+ mini_free(e);
+}
+
+/* return 1 if l is empty and 0 otherwise */
+static inline int
+ptrlist_is_empty(const struct ptrlist *l)
+{
+ if (unlikely(l == NULL)) {
+ ARG_MSG;
+ return 0;
+ }
+ assert((l->len != 0 || l->head.next == &l->head) &&
+ (l->head.next != &l->head || l->len == 0));
+ return l->len == 0;
+}
+
+/* add a new pointer list entry containing p to l; if check_dup is not 0
+ the new entry is only added if p is not already contained in a list
+ entry;
+ possible errors: HE_LOW_MEMORY, HE_IMPOSSIBLE_CONDITION */
+hipac_error
+ptrlist_add(struct ptrlist *l, void *p, int check_dup);
+
+#endif
diff -urN nf-hipac/kernel/hipac.c nfhipac/kernel/hipac.c
--- nf-hipac/kernel/hipac.c 1970-01-01 08:00:00.000000000 +0800
+++ nfhipac/kernel/hipac.c 2014-11-21 12:36:09.000000000 +0800
@@ -0,0 +1,3750 @@
+/*
+ * High performance packet classification
+ *
+ *
+ * (c) 2004-2005 MARA Systems AB
+ * +-----------------------------+
+ * | Michael Bellion |
+ * | |
+ * +-----------------------------+
+ *
+ * (c) 2002-2003 hipac core team :
+ * +---------------------------+--------------------------+
+ * | Michael Bellion | Thomas Heinz |
+ * | | |
+ * +---------------------------+--------------------------+
+ *
+ * Licenced under the GNU General Public Licence, version 2.
+ */
+
+
+#include "hipac.h"
+#include "global.h"
+#include "ihash.h"
+#include "dimtree.h"
+
+
+static struct hipac_chain *current_chain = NULL;
+static struct ihash* chain_hash = NULL;
+static struct ptrblock* native_dts = NULL;
+
+__u8 *dim2btype;
+__u8 d2blen;
+hipac_extract_t *extract_fn;
+static hipac_copy_constructor_t copy_fn;
+static hipac_destroy_exec_t destroy_fn;
+hipac_match_exec_t match_fn;
+hipac_target_exec_t target_fn;
+static hipac_eq_exec_t eq_fn;
+
+
+
+/*
+ * Some helpful defines in order to make code more readable
+ */
+
+#define DONT_COMMIT 0
+#define COMMIT 1
+#define DONT_INC 0
+#define INC 1
+#define DONT_ADD 0
+#define ADD 1
+#define ORIGIN_ALL 0xffff
+
+#define CHAIN_IS_REFERENCED(chain) ((chain)->ref_count != 0)
+#define CHAIN_NOT_CONNECTED(chain) ((chain)->start == NULL)
+#define IS_ROOT_CHAIN(chain) ((chain)->dimtree != NULL)
+#define IS_NOT_JUMP_RULE(rule) ((rule)->r.action != TARGET_CHAIN)
+#define IS_JUMP_RULE(rule) ((rule)->r.action == TARGET_CHAIN)
+
+#define P_ELEM(x, i) (STRBLOCK_ITH(x, i, struct path_ *))
+#define P_ELEM_DIMTREE(x, i) (STRBLOCK_ITH(x, i, struct path_ *)->dimtree)
+#define P_ELEM_PREV(x, i) (STRBLOCK_ITH(x, i, struct path_ *)->prev)
+#define P_ELEM_RULE(x, i) (STRBLOCK_ITH(x, i, struct path_ *)->rule)
+
+
+#define CHAIN_HASH_LEN 16
+#define CHAIN_HASH_AVR_BUCKET 3
+#define HIPAC_REC_LIMIT 10
+
+
+#ifdef DEBUG
+# define LOW_MEM(args...) do { NOTICE(args); return HE_LOW_MEMORY; \
+ } while (0)
+# define CHECK_ERROR(func) \
+ if (error == HE_LOW_MEMORY) { \
+ NOTICE(func " returned LOW_MEMORY error!"); \
+ } else if (error == HE_IMPOSSIBLE_CONDITION) { \
+ ERR(func " returned IMPOSSIBLE_CONDITION error!"); \
+ }
+
+ static inline hipac_error
+ strblock_append_check(struct strblock **b, const void *s, __u32 size){
+ __u32 i;
+ if (*b)
+ for (i = 0; i < (*b)->len; i++){
+ if (!(memcmp(STRBLOCK_ITH(*b, i, void *),
+ s, size)))
+ IMPOSSIBLE_CONDITION(
+ "already in strblock");
+ }
+ return strblock_append(b, s, size);
+ }
+
+#else
+# define LOW_MEM(args...) return HE_LOW_MEMORY
+# define CHECK_ERROR(func) \
+ if (error == HE_IMPOSSIBLE_CONDITION) { \
+ ERR(func " returned IMPOSSIBLE_CONDITION error!"); \
+ }
+
+ static inline hipac_error
+ strblock_append_check(struct strblock **b, const void *s, __u32 size){
+ return strblock_append(b, s, size);
+ }
+#endif
+
+
+
+
+/* element in strblock next_chain in struct hipac_chain
+ means that current chain contains 'count' >= 1 rules
+ that jump to chain 'chain' */
+struct next_chain_elem
+{
+ __u32 count;
+ struct hipac_chain *chain;
+};
+
+
+/* the combined rule of all the chain_rules on the path
+ from a ROOT_CHAIN to the current chain */
+struct prefix_rule
+{
+ __u32 origin;
+ struct ptrblock *exec_matches;
+ __u8 native_mct;
+ struct hipac_match first_match[0];
+};
+
+
+/* the path from a ROOT_CHAIN to the current chain;
+ dimtree: the dimtree corresponding to the ROOT of that path
+ prev: the previous chain_rule on that path
+ rule: the combined rule of all the chain_rules on that path */
+struct path_
+{
+ struct dimtree *dimtree;
+ struct chain_rule *prev;
+ struct prefix_rule *rule;
+};
+
+
+/* hipac_chain is the 'head' of the doubly linked list of chain_rules;
+ name: the name of the chain
+ ref_count: the number of rules that jump to this chain
+ next_chains: block of next_chain_elem structs; each chain that is
+ jumped to from a rule in this chain has its own
+ next_chain_elem in this block with its 'count' field set to
+ the number of rules that jump to that chain
+ paths: block of all the paths from any ROOT_CHAIN to this chain
+ start: contains pointers to dt_rules that mark the beginning
+ of this chain in the internal dt_chain
+ end: the same for the ending of the chain
+ dimtree: points to a dimtree if chain is a ROOT_CHAIN,
+ otherwise it's NULL */
+struct hipac_chain
+{
+ struct list_head head;
+ char name[HIPAC_CHAIN_NAME_MAX_LEN];
+ __u32 list_pos;
+ __u32 ref_count;
+ struct strblock *next_chains;
+ struct strblock *paths;
+ struct ptrblock *start;
+ struct ptrblock *end;
+ struct dimtree *dimtree;
+};
+
+/* chain_rule is contained in a cyclic doubly linked list of rules where the
+ 'head' of the list is of type struct hipac_chain;
+ dtr: contains pointers to dt_rules in the internal dt_chain that correspond
+ to this chain_rule */
+struct chain_rule
+{
+ struct list_head head;
+ struct ptrblock *dtr;
+ struct hipac_rule r;
+};
+
+
+
+
+
+/*
+ * Several functions to free certain structs.
+ * The functions recursively free all other data structures that
+ * are pointed to from within the structs.
+ */
+
+static inline void
+dt_rule_free(struct dt_rule *rule)
+{
+ if (rule->exec_match)
+ ptrblock_free(rule->exec_match);
+ hp_free(rule);
+}
+
+static inline void
+hipac_rule_free(struct hipac_rule *rule)
+{
+ destroy_fn(rule);
+ hp_free(rule);
+}
+
+static inline void
+chain_rule_free(struct chain_rule *rule)
+{
+ if (rule->dtr)
+ ptrblock_free(rule->dtr);
+ hp_free(rule);
+}
+
+static inline void
+chain_rule_destroy(struct chain_rule *rule)
+{
+ if (rule->dtr)
+ ptrblock_free(rule->dtr);
+ destroy_fn(&rule->r);
+ hp_free(rule);
+}
+
+static inline void
+prefix_rule_free(struct prefix_rule *p)
+{
+ if (p->exec_matches)
+ ptrblock_free(p->exec_matches);
+ hp_free(p);
+}
+
+static inline void
+path_free(struct path_ *p)
+{
+ if (p->rule)
+ prefix_rule_free(p->rule);
+ hp_free(p);
+}
+
+static inline void
+paths_free(struct strblock *paths)
+{
+ __u32 i;
+ for (i = 0; i < paths->len; i++)
+ prefix_rule_free(P_ELEM_RULE(paths, i));
+ strblock_free(paths);
+}
+
+/* End of free functions */
+
+
+
+
+
+
+
+
+/*
+ * chain_hash_* functions
+ */
+
+
+/* insert 'chain' into the global hash of all chains ('chain_hash')
+ possible errors: HE_LOW_MEMORY, HE_IMPOSSIBLE_CONDITION */
+static inline hipac_error
+chain_hash_insert(struct hipac_chain* chain)
+{
+ return ihash_insert(&chain_hash, chain->name, chain);//IS_THIS_CORRECT?
+}
+
+
+
+/* remove 'chain' from the global hash of all chains ('chain_hash')
+ the removed chain is not freed */
+static inline void
+chain_hash_remove(struct hipac_chain* chain)
+{
+ if (current_chain && current_chain == chain)
+ current_chain = NULL;
+ ihash_delete(chain_hash, chain->name, NULL);
+}
+
+
+
+/* replace 'org' with 'new' in global hash of all chains
+ the replaced chain is not freed */
+static inline hipac_error
+chain_hash_replace(struct hipac_chain *org, struct hipac_chain *new)
+{
+ if (current_chain && current_chain == org)
+ current_chain = NULL;
+ return ihash_replace(&chain_hash, org->name, NULL, new->name, new);
+}
+
+
+
+/* lookup 'chain' with name 'name' in global 'chain_hash',
+ the hash of all chains.
+ possible errors: HE_CHAIN_NOT_EXISTENT, HE_IMPOSSIBLE_CONDITION */
+static inline hipac_error
+chain_hash_lookup(const char* name, struct hipac_chain **chain)
+{
+ if (unlikely(!name || !chain))
+ ARG_ERR;
+ if ((current_chain) &&
+ (!strcmp(name, current_chain->name))){
+ *chain = current_chain;
+ return HE_OK;
+ }
+ *chain = (struct hipac_chain*) ihash_lookup(chain_hash, name);
+ if (*chain != NULL) {
+ current_chain = *chain;
+ return HE_OK;
+ }
+ return HE_CHAIN_NOT_EXISTENT;
+}
+
+
+/* End of chain_hash_* functions */
+
+
+
+
+
+/* get previous dt_rules of the internal dt_rule representations of
+ chain_rule 'rule'.
+ if previous chain_rule 'prev' is not a jump rule return pointer to
+ 'prev->dtr' and set 'free_needed' to 0. otherwise a new ptrblock
+ with pointers to the previous dt_rules has to be computed from the
+ 'chain->end' block of the chain 'prev' is pointing to and
+ 'free_needed' is set to 1.
+ possible errors: HE_LOW_MEMORY, HE_IMPOSSIBLE_CONDITION */
+static inline hipac_error
+get_prev_dt_rules(const struct hipac_chain *chain,
+ const struct chain_rule *rule,
+ __u8 *free_needed, struct ptrblock **p)
+{
+ struct chain_rule *prev;
+
+ if (unlikely(CHAIN_NOT_CONNECTED(chain)))
+ return HE_IMPOSSIBLE_CONDITION;
+
+ if (unlikely(rule->head.prev == &chain->head)){
+ *p = chain->start;
+ *free_needed = 0;
+ return HE_OK;
+ }
+
+ prev = list_entry(rule->head.prev, struct chain_rule, head);
+ *free_needed = IS_JUMP_RULE(prev);
+ if (!(*free_needed)){
+ *p = prev->dtr;
+ } else {
+ struct hipac_chain *c = NULL;
+ hipac_error error;
+ __u32 i;
+ chain_hash_lookup((void *) &prev->r
+ + prev->r.target_offset, &c);
+ *p = NULL;
+ for (i = 0; i < c->paths->len; i++){
+ if (prev == P_ELEM_PREV(c->paths, i)){
+ if ((error =
+ ptrblock_append(p, c->end->p[i]))){
+ CHECK_ERROR("ptrblock_append");
+ if (*p)
+ ptrblock_free(*p);
+ *p = NULL;
+ return error;
+ }
+ }
+ }
+ }
+ return HE_OK;
+}
+
+
+
+/* get next dt_rules of the internal dt_rule representations of
+ chain_rule 'rule'.
+ if next chain_rule 'next' is not a jump rule return pointer to
+ 'next->dtr' and set 'free_needed' to 0. otherwise a new ptrblock
+ with pointers to the next dt_rules has to be computed from the
+ 'chain->start' block of the chain 'next' is pointing to and
+ 'free_needed' is set to 1.
+ possible errors: HE_LOW_MEMORY, HE_IMPOSSIBLE_CONDITION */
+static inline hipac_error
+get_next_dt_rules(const struct hipac_chain *chain,
+ const struct chain_rule *rule,
+ __u8 *free_needed, struct ptrblock **p)
+{
+ struct chain_rule *next;
+
+ if (unlikely(CHAIN_NOT_CONNECTED(chain)))
+ return HE_IMPOSSIBLE_CONDITION;
+
+ if (unlikely(rule->head.next == &chain->head)){
+ *p = chain->end;
+ *free_needed = 0;
+ return HE_OK;
+ }
+
+ next = list_entry(rule->head.next, struct chain_rule, head);
+ *free_needed = IS_JUMP_RULE(next);
+ if (!(*free_needed)){
+ *p = next->dtr;
+ } else {
+ struct hipac_chain *c = NULL;
+ hipac_error error;
+ __u32 i;
+ chain_hash_lookup((void *) &next->r +
+ next->r.target_offset, &c);
+ *p = NULL;
+ for (i = 0; i < c->paths->len; i++){
+ if (next == P_ELEM_PREV(c->paths, i)){
+ if ((error =
+ ptrblock_append(p, c->start->p[i]))){
+ CHECK_ERROR("ptrblock_append");
+ if (*p)
+ ptrblock_free(*p);
+ *p = NULL;
+ return error;
+ }
+ }
+ }
+ }
+ return HE_OK;
+}
+
+
+
+
+
+/*
+ * chain_* functions
+ */
+
+
+/* create new hipac_chain with name 'name' and initialize all fields
+ in struct hipac_chain 'result'. 'list_pos' is used to initialize
+ the list_pos member of 'result'
+ hipac_chain 'result' is not inserted into 'chain_hash'.
+ possible errors: HE_LOW_MEMORY, HE_CHAIN_EXISTS,
+ HE_IMPOSSIBLE_CONDITION */
+static inline hipac_error
+chain_new(const char *name, struct hipac_chain **result, __u32 list_pos)
+{
+ struct hipac_chain *chain;
+ hipac_error error;
+
+ if (unlikely(!name || !result))
+ ARG_ERR;
+
+ if (unlikely(!(error = chain_hash_lookup(name, &chain))))
+ return HE_CHAIN_EXISTS;
+
+ *result = chain = hp_alloc(sizeof(*chain), ADD);
+ if (!chain)
+ LOW_MEM("chain alloc failed!");
+ INIT_LIST_HEAD(&chain->head);
+ strncpy(chain->name, name, HIPAC_CHAIN_NAME_MAX_LEN);
+ chain->name[HIPAC_CHAIN_NAME_MAX_LEN - 1] = '\0';
+ chain->list_pos = list_pos;
+ chain->ref_count = 0;
+ chain->next_chains = NULL;
+ chain->paths = NULL;
+ chain->start = NULL;
+ chain->end = NULL;
+ chain->dimtree = NULL;
+ return HE_OK;
+}
+
+
+
+/* free hipac_chain 'chain' and recursively all other data
+ structures that are pointed to from within this struct.
+ also free all rules in this chain.
+ attention: make sure 'chain' is NOT in the global
+ 'chain_hash' anymore! */
+static inline void
+chain_free(struct hipac_chain* chain)
+{
+ struct list_head *lh;
+ struct chain_rule *rule;
+
+ if (unlikely(!chain)){
+ ARG_MSG;
+ return;
+ }
+
+ lh = chain->head.next;
+ while (lh != &chain->head) {
+ rule = list_entry(lh, struct chain_rule, head);
+ lh = lh->next;
+ list_del(lh->prev);
+ chain_rule_destroy(rule);
+ }
+ if (chain->next_chains)
+ strblock_free(chain->next_chains);
+ if (chain->paths)
+ paths_free(chain->paths);
+ if (chain->start)
+ ptrblock_free(chain->start);
+ if (chain->end)
+ ptrblock_free(chain->end);
+ hp_free(chain);
+}
+
+
+
+/* flush hipac_chain 'chain'
+ free all rules in this chain and all other data structures
+ that are pointed to from within this struct. */
+static inline void
+chain_flush(struct hipac_chain* chain)
+{
+ struct list_head *lh;
+ struct chain_rule *rule;
+
+ if (unlikely(!chain)){
+ ARG_MSG;
+ return;
+ }
+
+ lh = chain->head.next;
+ while (lh != &chain->head) {
+ rule = list_entry(lh, struct chain_rule, head);
+ lh = lh->next;
+ list_del(lh->prev);
+ chain_rule_destroy(rule);
+ }
+ if (chain->next_chains){
+ strblock_free(chain->next_chains);
+ chain->next_chains = NULL;
+ }
+ if (chain->paths){
+ paths_free(chain->paths);
+ chain->paths = NULL;
+ }
+ if (chain->start){
+ ptrblock_free(chain->start);
+ chain->start = NULL;
+ }
+ if (chain->end){
+ ptrblock_free(chain->end);
+ chain->end = NULL;
+ }
+ chain->ref_count = 0;
+
+}
+
+
+
+/* insert chain_rule 'rule' into 'chain' at position rule->r.pos;
+ if chain is empty, rule->r.pos is set to 1;
+ if rule->r.pos is larger than maxpos, rule->r.pos is set to maxpos;
+ 'do_inc': when not 0 the pos field of all rules with
+ pos >= rule->r.pos is incremented by 1 */
+static inline void
+chain_insert(struct hipac_chain* chain, struct chain_rule *rule,
+ const __u8 do_inc)
+{
+ struct list_head *lh;
+ __u32 rulepos;
+ struct chain_rule *curule;
+
+ if (unlikely(!chain || !rule)){
+ ARG_MSG;
+ return;
+ }
+
+ if (list_empty(&chain->head)) {
+ list_add(&rule->head, &chain->head);
+ rule->r.pos = 1;
+ return;
+ }
+
+ if (rule->r.pos == 0)
+ rule->r.pos = 1;
+
+ lh = chain->head.prev;
+ rulepos = rule->r.pos;
+ curule = list_entry(lh, struct chain_rule, head);
+
+ if (rulepos > curule->r.pos) {
+ list_add_tail(&rule->head, &chain->head);
+ rule->r.pos = curule->r.pos + 1;
+ return;
+ }
+
+ if (do_inc) {
+ do {
+ curule->r.pos++;
+ lh = lh->prev;
+ curule = list_entry(lh, struct chain_rule, head);
+ } while (lh != &chain->head && curule->r.pos >= rulepos);
+ } else {
+ do {
+ lh = lh->prev;
+ curule = list_entry(lh, struct chain_rule, head);
+ } while (lh != &chain->head && curule->r.pos >= rulepos);
+ }
+
+ if (lh == &chain->head) {
+ assert(rulepos == 1);
+ assert(!do_inc ||
+ list_entry(chain->head.next,
+ struct chain_rule, head)->r.pos == 2);
+ assert(do_inc ||
+ list_entry(chain->head.next,
+ struct chain_rule, head)->r.pos == 1);
+
+ list_add(&rule->head, &chain->head);
+ } else {
+ assert(curule->r.pos < rulepos);
+ assert(!do_inc ||
+ list_entry(curule->head.next,
+ struct chain_rule,
+ head)->r.pos == rulepos + 1);
+ assert(do_inc ||
+ list_entry(curule->head.next,
+ struct chain_rule,
+ head)->r.pos == rulepos);
+
+ list_add(&rule->head, &curule->head);
+ }
+}
+
+
+
+/* delete and all rules in 'chain' with position == 'rulepos';
+ attention: you must NOT call chain_delete with an empty chain!
+ does not free the rules! */
+static void
+chain_delete(const struct hipac_chain* chain, const __u32 rulepos)
+{
+ struct chain_rule *current_rule;
+
+ if (unlikely(!chain)){
+ ARG_MSG;
+ return;
+ }
+ current_rule = list_entry(chain->head.prev, struct chain_rule, head);
+
+ while (current_rule->r.pos > rulepos) {
+ current_rule->r.pos--;
+ current_rule = list_entry(current_rule->head.prev,
+ struct chain_rule, head);
+ }
+ list_del(¤t_rule->head);
+}
+
+
+
+/* find rule in hipac_chain 'chain' that equals hipac_rule 'rule'.
+ possible errors: HE_RULE_NOT_EXISTENT, HE_IMPOSSIBLE_CONDITION */
+static inline hipac_error
+chain_find_rule(const struct hipac_chain *chain, const struct hipac_rule *rule,
+ struct chain_rule **result)
+{
+ struct list_head *lh;
+ struct chain_rule *currule;
+
+ if (!chain || !rule || !result)
+ ARG_ERR;
+
+ list_for_each(lh, &chain->head) {
+ currule = list_entry(lh, struct chain_rule, head);
+ if (eq_fn(rule, &currule->r)){
+ *result = currule;
+ return HE_OK;
+ }
+ }
+ return HE_RULE_NOT_EXISTENT;
+}
+
+
+
+/* find rule in hipac_chain 'chain' with position 'pos'
+ possible errors: HE_RULE_NOT_EXISTENT, HE_IMPOSSIBLE_CONDITION */
+static inline hipac_error
+chain_find_rule_with_pos(const struct hipac_chain *chain, const __u32 pos,
+ struct chain_rule **result)
+{
+ struct list_head *lh;
+ struct chain_rule *currule;
+
+ if (!chain || !result)
+ ARG_ERR;
+
+ list_for_each(lh, &chain->head) {
+ currule = list_entry(lh, struct chain_rule, head);
+ if (currule->r.pos == pos){
+ *result = currule;
+ return HE_OK;
+ }
+ }
+ return HE_RULE_NOT_EXISTENT;
+}
+
+
+/* End of chain_* functions */
+
+
+
+
+
+
+/* build chain_rule 'result' from hipac_rule 'rule'.
+ possible errors: HE_LOW_MEMORY, HE_IMPOSSIBLE_CONDITION */
+hipac_error
+build_chain_rule_from_hipac_rule(const struct hipac_rule *rule,
+ struct chain_rule **result)
+{
+ if (unlikely(!rule || !result))
+ ARG_ERR;
+
+ *result = hp_alloc(sizeof(**result) - sizeof(struct hipac_rule)
+ + rule->size, ADD);
+ if (!(*result))
+ LOW_MEM("chain_rule alloc failed!");
+
+ (*result)->dtr = NULL;
+ copy_fn(rule, &(*result)->r);
+ return HE_OK;
+}
+
+
+
+/* build hipac_rule 'result' from dt_rule 'dt_rule'.
+ possible errors: HE_LOW_MEMORY, HE_IMPOSSIBLE_CONDITION */
+hipac_error
+build_hipac_rule_from_dt_rule(const struct dt_rule *dt_rule,
+ struct hipac_rule **result)
+{
+ __u32 size, exec_match_size = 0;
+ __u32 i;
+
+ if (unlikely(!dt_rule || !result))
+ ARG_ERR;
+
+ size = sizeof(**result)
+ + dt_rule->dt_match_len * sizeof(struct hipac_match)
+ + dt_rule->exec_target_size;
+
+ if (dt_rule->exec_match){
+ for (i = 0; i < dt_rule->exec_match->len; i += 2){
+ exec_match_size += (void *)
+ dt_rule->exec_match->p[i + 1]
+ - dt_rule->exec_match->p[i];
+ }
+ }
+ size += exec_match_size;
+
+ *result = hp_alloc(size, ADD);
+ if (!(*result))
+ LOW_MEM("hipac_rule alloc failed!");
+
+ (*result)->pos = dt_rule->spec.pos;
+ (*result)->size = size;
+ (*result)->origin = 0;
+ (*result)->action = dt_rule->spec.action;
+ (*result)->native_mct = dt_rule->dt_match_len;
+ if (dt_rule->exec_match)
+ (*result)->match_offset = sizeof(**result)
+ + dt_rule->dt_match_len * sizeof(struct hipac_match);
+ else (*result)->match_offset = 0;
+ (*result)->target_offset = sizeof(**result)
+ + dt_rule->dt_match_len * sizeof(struct hipac_match)
+ + exec_match_size;
+
+ for (i = 0; i < dt_rule->dt_match_len; i++){
+ (*result)->first_match[i].dimid =
+ dt_rule->first_dt_match[i].dimid;
+ (*result)->first_match[i].invert = 0;
+ (*result)->first_match[i].left =
+ dt_rule->first_dt_match[i].left;
+ (*result)->first_match[i].right =
+ dt_rule->first_dt_match[i].right;
+ }
+ if (dt_rule->exec_match){
+ void *pos = (void *) (*result) + (*result)->match_offset;
+ for (i = 0; i < dt_rule->exec_match->len; i += 2){
+ size = dt_rule->exec_match->p[i + 1]
+ - dt_rule->exec_match->p[i];
+ memcpy(pos, dt_rule->exec_match->p[i], size);
+ pos += size;
+ }
+ }
+ if (dt_rule->exec_target_size){
+ memcpy((void *) (*result) + (*result)->target_offset,
+ dt_rule->exec_target, dt_rule->exec_target_size);
+ }
+ return HE_OK;
+}
+
+
+
+/* if hipac_rule 'r' contains exec_matches, add a pointer to the beginning
+ and a pointer to the end of that exec_matches to the ptrblock '*p'
+ possible errors: HE_LOW_MEMORY, HE_IMPOSSIBLE_CONDITION */
+static inline hipac_error
+add_exec_matches(struct ptrblock **p, const struct hipac_rule *r)
+{
+ hipac_error error;
+
+ if (unlikely(!p || !r))
+ ARG_ERR;
+
+ if (r->match_offset == 0)
+ return HE_OK;
+
+ if ((error = ptrblock_append(p, (void *) r + r->match_offset))){
+ CHECK_ERROR("ptrblock_append");
+ return error;
+ }
+ if ((error = ptrblock_append(p, (void *) r + r->target_offset))){
+ CHECK_ERROR("ptrblock_append");
+ ptrblock_delete_tail(p);
+ return error;
+ }
+ return HE_OK;
+}
+
+
+
+/* build new dt_rule from prefix_rule and/or hipac_rule.
+ prefix_rule and/or hipac_rule can be NULL.
+ pos: the position of the new dt_rule; is written to result->spec.pos
+ action: the action of the new dt_rule; is written to result->spec.action
+ the exec_matches from prefix and hipac_rule are merged into
+ result->exec_match.
+ if the hipac_rule contains a exec_target it is written to
+ result->exec_target.
+ attention: does NOT copy the native matches, this must be done externally!
+ allocs space for prefix->native_mct + rule->native_mct matches!
+ when merging the native matches externally, remember to do a
+ 'hipac_realloc' when prefix and rule contain the same dimids!
+ possible errors: HE_LOW_MEMORY, HE_IMPOSSIBLE_CONDITION */
+static inline hipac_error
+build_dt_rule(struct prefix_rule *prefix, const struct hipac_rule *rule,
+ const __u32 pos, const __u32 action, struct dt_rule **result)
+{
+ hipac_error error;
+ struct dt_rule *new_dt_rule;
+ __u8 mct = 0;
+
+ if (unlikely(!result))
+ ARG_ERR;
+
+ if (prefix)
+ mct += prefix->native_mct;
+ if (rule)
+ mct += rule->native_mct;
+
+ new_dt_rule = hp_alloc(dt_rule_size(mct), ADD);
+ if (!new_dt_rule)
+ LOW_MEM("dt_rule alloc failed!");
+ new_dt_rule->spec.rlp = 0;
+ new_dt_rule->spec.rtype = RT_RULE;
+ new_dt_rule->spec.action = action;
+ new_dt_rule->spec.pos = pos;
+ new_dt_rule->exec_match = NULL;
+ new_dt_rule->exec_target = NULL;
+ new_dt_rule->exec_target_size = 0;
+ new_dt_rule->deleted = 0;
+
+ if (prefix){
+ if ((error = ptrblock_clone(prefix->exec_matches,
+ &new_dt_rule->exec_match))){
+ dt_rule_free(new_dt_rule);
+ CHECK_ERROR("ptrblock_clone");
+ return error;
+ }
+ }
+ if (rule){
+ if ((error = add_exec_matches(&new_dt_rule->exec_match,
+ rule))){
+ dt_rule_free(new_dt_rule);
+ CHECK_ERROR("add_exec_matches");
+ return error;
+ }
+ }
+ if (action == TARGET_EXEC){
+ new_dt_rule->exec_target = (void *) rule + rule->target_offset;
+ new_dt_rule->exec_target_size = ((void *) rule + rule->size)
+ - ((void *) rule + rule->target_offset);
+ }
+ new_dt_rule->dt_match_len = mct;
+ *result = new_dt_rule;
+ return HE_OK;
+}
+
+
+
+/* Remove last element from strblock 'paths' and also free the data
+ structures that are pointed to from within this element */
+static inline void
+paths_delete_tail(struct strblock **paths)
+{
+ struct prefix_rule *p = P_ELEM_RULE(*paths, (*paths)->len - 1);
+ if (p)
+ prefix_rule_free(p);
+ strblock_delete_tail(paths);
+}
+
+
+
+/* Remove element with position 'pos' from strblock 'paths' and also free
+ the data structures that are pointed to from within this element. */
+static inline void
+paths_delete_pos(struct strblock **paths, __u32 pos)
+{
+ struct prefix_rule *p = P_ELEM_RULE(*paths, pos);
+ if (p)
+ prefix_rule_free(p);
+ strblock_delete_pos(paths, pos);
+}
+
+
+/* count number of negations/inverted matches in hipac_match array */
+static inline __u8
+count_inv_matches(const struct hipac_match *first_match, const __u8 match_cnt)
+{
+ __u8 i, result = 0;
+ for (i = 0; i < match_cnt; i++)
+ if (first_match[i].invert)
+ result++;
+ return result;
+}
+
+
+
+/* count number of negations/inverted matches in both rules, but
+ without counting matches in the same dimid twice */
+static inline __u8
+count_inv_matches_2(const struct hipac_rule *hipac_rule,
+ const struct prefix_rule *prefix_rule)
+{
+ __u8 i, j, result = 0;
+
+ for (i = 0, j = 0; i < prefix_rule->native_mct; i++){
+ while ((j < hipac_rule->native_mct)
+ && (hipac_rule->first_match[j].dimid
+ < prefix_rule->first_match[i].dimid)){
+ if (hipac_rule->first_match[j].invert)
+ result++;
+ j++;
+ }
+ if ((j < hipac_rule->native_mct)
+ && (hipac_rule->first_match[j].dimid
+ == prefix_rule->first_match[i].dimid)){
+ if (hipac_rule->first_match[j].invert)
+ result++;
+ j++;
+ continue;
+ }
+ if (prefix_rule->first_match[i].invert)
+ result++;
+ }
+ while (j < hipac_rule->native_mct){
+ if (hipac_rule->first_match[j].invert)
+ result++;
+ j++;
+ }
+ return result;
+}
+
+
+
+/* merge hipac_match 's' into dt_match 'new' while keeping negation
+ in mind. */
+static inline void
+merge_dimension(struct hipac_match *s, struct dt_match *new,
+ __u32 inv, __u16 *inv_match, __u8 *not_valid)
+{
+ if (!(s->invert)){
+ new->dimid = s->dimid;
+ new->left = s->left;
+ new->right = s->right;
+ return;
+ }
+ if (inv & (1 << *inv_match)){
+ if (s->right <
+ MAXKEY(dim2btype[s->dimid])){
+ new->dimid = s->dimid;
+ new->left = s->right + 1;
+ new->right = MAXKEY(dim2btype[s->dimid]);
+ (*inv_match)++;
+ } else {
+ *not_valid = 1;
+ }
+ } else {
+ if (s->left){
+ new->dimid = s->dimid;
+ new->left = 0;
+ new->right = s->left - 1;
+ (*inv_match)++;
+ } else {
+ *not_valid = 1;
+ }
+ }
+}
+
+
+
+/* insert new dt_rule(s) at position 'pos' into dimtree 'path->dimtree'.
+ the new dt_rule is created from information found in 'path->rule'
+ and 'rule'. if 'path->rule' or 'rule' contain negation solve this by
+ adding several new dt_rules to the dimtree. append the (first) new
+ dt_rule to the 'rule->dtr' pointer block.
+ if commit is not 0 commit the changes.
+ in case of an error undo all changes.
+ attention: in case of an error already inserted rules are not removed
+ from the internal dimtree chain. those rules have to be
+ removed externally.
+ possible errors: HE_LOW_MEMORY, HE_RULE_ORIGIN_MISMATCH,
+ HE_RULE_PREFIX_MISMATCH, HE_IMPOSSIBLE_CONDITION */
+hipac_error
+insert_into_dt(const struct path_ *path,
+ struct chain_rule *rule,
+ const __u32 pos, const __u8 commit)
+{
+ struct dt_rule *new_dt_rule;
+ hipac_error error;
+ __u32 i, j, inv;
+ __u8 first = 1;
+ __u8 num;
+ struct dt_match *new;
+ __u32 mct = 0;
+
+ if (unlikely(!path || !path->rule || !rule))
+ ARG_ERR;
+
+ num = count_inv_matches_2(&rule->r, path->rule);
+
+ mct = rule->r.native_mct + path->rule->native_mct;
+
+ if (!(num)){
+ __u32 new_mct = 0;
+ struct hipac_match *p = path->rule->first_match;
+ struct hipac_match *r = rule->r.first_match;
+
+
+ if ((error = build_dt_rule(path->rule, &rule->r, pos,
+ rule->r.action, &new_dt_rule))){
+ CHECK_ERROR("build_dt_rule");
+ return error;
+ }
+
+ new = new_dt_rule->first_dt_match;
+
+ for (i = 0, j = 0; i < path->rule->native_mct; i++){
+ while ((j < rule->r.native_mct)
+ && (r[j].dimid < p[i].dimid)){
+ new[new_mct].dimid = r[j].dimid;
+ new[new_mct].left = r[j].left;
+ new[new_mct].right = r[j].right;
+ j++;
+ new_mct++;
+
+ }
+ if ((j < rule->r.native_mct)
+ && (r[j].dimid == p[i].dimid)){
+ if (p[i].invert){
+ if (!(r[j].right < p[i].left
+ || r[j].left > p[i].right)){
+ dt_rule_free(new_dt_rule);
+ return HE_RULE_PREFIX_MISMATCH;
+ }
+ } else if (r[j].left < p[i].left
+ || r[j].right > p[i].right){
+ dt_rule_free(new_dt_rule);
+ return HE_RULE_PREFIX_MISMATCH;
+ }
+ new[new_mct].dimid = r[j].dimid;
+ new[new_mct].left = r[j].left;
+ new[new_mct].right = r[j].right;
+ j++;
+ new_mct++;
+ continue;
+ }
+ new[new_mct].dimid = p[i].dimid;
+ new[new_mct].left = p[i].left;
+ new[new_mct].right = p[i].right;
+ new_mct++;
+ }
+
+ while (j < rule->r.native_mct){
+ new[new_mct].dimid = r[j].dimid;
+ new[new_mct].left = r[j].left;
+ new[new_mct].right = r[j].right;
+ j++;
+ new_mct++;
+ }
+
+ if (new_mct < mct){
+ new_dt_rule->dt_match_len = new_mct;
+ new_dt_rule = hp_realloc(new_dt_rule,
+ dt_rule_size(new_mct));
+ if (!new_dt_rule){
+ dt_rule_free(new_dt_rule);
+ IMPOSSIBLE_CONDITION("new_dt_rule is NULL");
+ }
+ }
+
+ if ((error = ptrblock_append(&rule->dtr,
+ (void *) new_dt_rule))){
+ CHECK_ERROR("ptrblock_append");
+ dt_rule_free(new_dt_rule);
+ return error;
+ }
+ if ((error = dimtree_insert(path->dimtree, new_dt_rule,
+ rule->r.origin, INC, commit))){
+ CHECK_ERROR("dimtree_insert");
+ return error;
+ }
+ return HE_OK;
+ }
+ //else we have a rule containing negation
+
+ for (inv = 0; inv < (1 << num); inv++){
+ __u16 j;
+ __u8 not_valid = 0;
+ __u16 inv_match = 0;
+ __u32 new_mct = 0;
+ struct hipac_match *p = path->rule->first_match;
+ struct hipac_match *r = rule->r.first_match;
+
+ if ((error = build_dt_rule(path->rule, &rule->r, pos,
+ rule->r.action, &new_dt_rule))){
+ CHECK_ERROR("build_dt_rule");
+ if (!first)
+ dimtree_failed(native_dts);
+ return error;
+ }
+
+ new = new_dt_rule->first_dt_match;
+
+ for (i = 0, j = 0; i < path->rule->native_mct; i++){
+ while ((j < rule->r.native_mct)
+ && (r[j].dimid < p[i].dimid)){
+ merge_dimension(&r[j], &new[new_mct], inv,
+ &inv_match, ¬_valid);
+ if (not_valid)
+ break;
+ j++;
+ new_mct++;
+ }
+ if (not_valid)
+ break;
+ if ((j < rule->r.native_mct)
+ && (r[j].dimid == p[i].dimid)){
+ if (!r[j].invert && !p[i].invert){
+ if (r[j].left < p[i].left
+ || r[j].right > p[i].right){
+ dt_rule_free(new_dt_rule);
+ if (!first)
+ dimtree_failed(
+ native_dts);
+ return HE_RULE_PREFIX_MISMATCH;
+ }
+ } else if (r[j].invert && !p[i].invert){
+ dt_rule_free(new_dt_rule);
+ if (!first)
+ dimtree_failed(native_dts);
+ return HE_RULE_PREFIX_MISMATCH;
+ } else if (!r[j].invert && p[i].invert){
+ if (!(r[j].right < p[i].left
+ || r[j].left > p[i].right)){
+ dt_rule_free(new_dt_rule);
+ if (!first)
+ dimtree_failed(
+ native_dts);
+ return HE_RULE_PREFIX_MISMATCH;
+ }
+ } else if(r[j].invert && p[i].invert){
+ if (r[j].left > p[i].left
+ || r[j].right < p[i].right){
+ dt_rule_free(new_dt_rule);
+ if (!first)
+ dimtree_failed(
+ native_dts);
+ return HE_RULE_PREFIX_MISMATCH;
+ }
+ }
+
+ merge_dimension(&r[j], &new[new_mct], inv,
+ &inv_match, ¬_valid);
+ if (not_valid)
+ break;
+ j++;
+ new_mct++;
+ continue;
+
+ }
+ merge_dimension(&p[i], &new[new_mct], inv,
+ &inv_match, ¬_valid);
+ if (not_valid)
+ break;
+ new_mct++;
+ }
+ if (not_valid){
+ dt_rule_free(new_dt_rule);
+ continue;
+ }
+ while (j < rule->r.native_mct){
+ merge_dimension(&r[j], &new[new_mct], inv,
+ &inv_match, ¬_valid);
+ if (not_valid)
+ break;
+ j++;
+ new_mct++;
+ }
+ if (not_valid){
+ dt_rule_free(new_dt_rule);
+ continue;
+ }
+
+ if (new_mct < mct){
+ new_dt_rule->dt_match_len = new_mct;
+ new_dt_rule = hp_realloc(new_dt_rule,
+ dt_rule_size(new_mct));
+ if (!new_dt_rule){
+ dt_rule_free(new_dt_rule);
+ IMPOSSIBLE_CONDITION("new_dt_rule is NULL");
+ }
+ }
+
+ if (first){
+ if ((error = ptrblock_append(&rule->dtr,
+ (void *) new_dt_rule))){
+ CHECK_ERROR("ptrblock_append");
+ dt_rule_free(new_dt_rule);
+ return error;
+ }
+ }
+ if ((error = dimtree_insert(path->dimtree, new_dt_rule,
+ rule->r.origin, first,
+ DONT_COMMIT))){
+ CHECK_ERROR("dimtree_insert");
+ return error;
+ }
+ if (first)
+ first = 0;
+ }
+ if (commit)
+ dimtree_commit(native_dts);
+ return HE_OK;
+}
+
+
+
+/* detect loop in hipac_chains.
+ if any rule in hipac_chain 'chain' (or recursively in any other
+ hipac_chain any rule in 'chain' jumps to) jumps to hipac_chain 'org'
+ a loop is detected.
+ possible errors: HE_LOOP_DETECTED, HE_REC_LIMIT */
+hipac_error
+detect_loop(const struct hipac_chain *chain,
+ const struct hipac_chain *org, __u32 depth)
+{
+ if (unlikely(!chain || !org))
+ ARG_ERR;
+
+ if (depth > HIPAC_REC_LIMIT)
+ return HE_REC_LIMIT;
+
+ if (chain->next_chains){
+ __u32 i;
+ hipac_error error;
+ struct hipac_chain *next;
+ for (i = 0; i < chain->next_chains->len; i++){
+ next = STRBLOCK_ITH(chain->next_chains, i,
+ struct next_chain_elem *)->chain;
+ if (next == org)
+ return HE_LOOP_DETECTED;
+ if ((error = detect_loop(next, org, depth + 1)))
+ return error;
+ }
+ }
+ return HE_OK;
+}
+
+
+
+/* add new path to the paths block of hipac_chain 'chain'.
+ the new path is computed from the path 'path' and the chain_rule 'rule'.
+ possible errors: HE_LOW_MEMORY, HE_RULE_PREFIX_MISMATCH,
+ HE_IMPOSSIBLE_CONDITION */
+hipac_error
+add_path(struct hipac_chain *chain, const struct path_ *path,
+ struct chain_rule *rule)
+{
+ hipac_error error;
+ struct path_ *new_path;
+ struct prefix_rule *new_prefix;
+ struct hipac_match *r, *p, *new;
+ __u8 mct, i, j = 0, new_mct = 0;
+
+ if (!chain || !path || !path->rule || !rule)
+ ARG_ERR;
+
+ mct = rule->r.native_mct + path->rule->native_mct;
+
+ new_prefix = hp_alloc(sizeof(*new_prefix)
+ + mct * sizeof(struct hipac_match), ADD);
+ if (!new_prefix){
+ LOW_MEM("new_prefix alloc failed!");
+ }
+ new_path = hp_alloc(sizeof(*new_path), ADD);
+ if (!new_path){
+ hp_free(new_prefix);
+ LOW_MEM("new_path alloc failed!");
+ }
+
+ new_path->dimtree = path->dimtree;
+ new_path->prev = rule;
+ new_path->rule = new_prefix;
+
+ new_prefix->origin = path->rule->origin & rule->r.origin;
+ new_prefix->exec_matches = NULL;
+ if ((error = ptrblock_clone(path->rule->exec_matches,
+ &new_prefix->exec_matches))){
+ CHECK_ERROR("ptrblock_clone");
+ path_free(new_path);
+ return error;
+ }
+ if ((error = add_exec_matches(&new_prefix->exec_matches,
+ &rule->r))){
+ CHECK_ERROR("add_exec_matches");
+ path_free(new_path);
+ return error;
+ }
+ r = rule->r.first_match;
+ p = path->rule->first_match;
+ new = new_prefix->first_match;
+
+ for (i = 0; i < path->rule->native_mct; i++){
+ while ((j < rule->r.native_mct)
+ && (r[j].dimid < p[i].dimid)){
+ new[new_mct].dimid = r[j].dimid;
+ new[new_mct].invert = r[j].invert;
+ new[new_mct].left = r[j].left;
+ new[new_mct].right = r[j].right;
+ j++;
+ new_mct++;
+ }
+ if ((j < rule->r.native_mct)
+ && (r[j].dimid == p[i].dimid)){
+ if (!r[j].invert && !p[i].invert){
+ if (r[j].left < p[i].left
+ || r[j].right > p[i].right){
+ path_free(new_path);
+ return HE_RULE_PREFIX_MISMATCH;
+ }
+ } else if (r[j].invert && !p[i].invert){
+ path_free(new_path);
+ return HE_RULE_PREFIX_MISMATCH;
+ } else if (!r[j].invert && p[i].invert){
+ if (!(r[j].right < p[i].left
+ || r[j].left > p[i].right)){
+ path_free(new_path);
+ return HE_RULE_PREFIX_MISMATCH;
+ }
+ } else if(r[j].invert && p[i].invert){
+ if (r[j].left > p[i].left
+ || r[j].right < p[i].right){
+ path_free(new_path);
+ return HE_RULE_PREFIX_MISMATCH;
+ }
+ }
+
+ new[new_mct].dimid = r[j].dimid;
+ new[new_mct].invert = r[j].invert;
+ new[new_mct].left = r[j].left;
+ new[new_mct].right = r[j].right;
+ j++;
+ new_mct++;
+ continue;
+ }
+ new[new_mct].dimid = p[i].dimid;
+ new[new_mct].invert = p[i].invert;
+ new[new_mct].left = p[i].left;
+ new[new_mct].right = p[i].right;
+ new_mct++;
+ }
+
+ while (j < rule->r.native_mct){
+ new[new_mct].dimid = r[j].dimid;
+ new[new_mct].invert = r[j].invert;
+ new[new_mct].left = r[j].left;
+ new[new_mct].right = r[j].right;
+ j++;
+ new_mct++;
+ }
+
+ if (new_mct < mct){
+ new_prefix = hp_realloc(new_prefix, sizeof(*new_prefix)
+ + new_mct
+ * sizeof(struct hipac_match));
+ if (!new_prefix){
+ path_free(new_path);
+ IMPOSSIBLE_CONDITION("new_prefix is NULL");
+ }
+ new_path->rule = new_prefix;
+ }
+
+ new_prefix->native_mct = new_mct;
+
+ if ((error = strblock_append_check(&chain->paths, new_path,
+ sizeof(*new_path)))){
+ CHECK_ERROR("strblock_append");
+ path_free(new_path);
+ return error;
+ }
+ hp_free(new_path);
+ return HE_OK;
+
+}
+
+
+
+/* add a dt_rule marking the beginning of the hipac_chain 'chain'
+ in the internal dimtree chain to 'path->dimtree' and add a pointer
+ to that new dt_rule to the 'chain->start' ptrblock.
+ the dt_rule is added with TARGET_DUMMY, so that it is not inserted
+ into the internal dimtree only into the internal dimtree chain.
+ possible errors: HE_LOW_MEMORY, HE_IMPOSSIBLE_CONDITION */
+static inline hipac_error
+add_chain_start(struct hipac_chain *chain, const struct path_ *path,
+ const __u32 pos)
+{
+ hipac_error error;
+ struct dt_rule *start;
+
+ if ((error = build_dt_rule(NULL, NULL, pos,
+ TARGET_DUMMY, &start))){
+ CHECK_ERROR("build_dt_rule");
+ return error;
+ }
+ if ((error = ptrblock_append(&chain->start, start))){
+ CHECK_ERROR("ptrblock_append");
+ dt_rule_free(start);
+ return error;
+ }
+ if ((error = dimtree_insert(path->dimtree, start,
+ ORIGIN_ALL, INC, DONT_COMMIT))){
+ CHECK_ERROR("dimtree_insert");
+ ptrblock_delete_tail(&chain->start);
+ dt_rule_free(start);
+ return error;
+ }
+ return HE_OK;
+}
+
+
+
+/* add a dt_rule marking the end of the hipac_chain 'chain'
+ in the internal dimtree chain to 'path->dimtree' and add a pointer
+ to that new dt_rule to the 'chain->end' ptrblock.
+ the dt_rule added to the internal dimtree corresponds to 'path->rule'.
+ possible errors: HE_LOW_MEMORY, HE_IMPOSSIBLE_CONDITION */
+hipac_error
+add_chain_end(struct hipac_chain *chain, struct path_ *path,
+ const __u32 pos)
+{
+ struct dt_rule *new_dt_rule;
+ hipac_error error;
+ __u32 i;
+ __u8 first = 1;
+ __u8 num;
+ struct hipac_match *old;
+ struct dt_match *new;
+
+
+ num = count_inv_matches((struct hipac_match *) path->rule->first_match,
+ path->rule->native_mct);
+
+ if (!(num)){
+ if ((error = build_dt_rule(path->rule, NULL, pos,
+ TARGET_NONE, &new_dt_rule))){
+ CHECK_ERROR("build_dt_rule");
+ return error;
+ }
+ for (i = 0; i < path->rule->native_mct; i++){
+ new_dt_rule->first_dt_match[i].dimid =
+ path->rule->first_match[i].dimid;
+ new_dt_rule->first_dt_match[i].left =
+ path->rule->first_match[i].left;
+ new_dt_rule->first_dt_match[i].right =
+ path->rule->first_match[i].right;
+ }
+ if ((error = ptrblock_append(&chain->end,
+ (void *) new_dt_rule))){
+ CHECK_ERROR("ptrblock_append");
+ dt_rule_free(new_dt_rule);
+ return error;
+ }
+ if ((error = dimtree_insert(path->dimtree, new_dt_rule,
+ ORIGIN_ALL, INC, DONT_COMMIT))){
+ CHECK_ERROR("dimtree_insert");
+ return error;
+ }
+ return HE_OK;
+ }
+ //else we have a rule containing negation
+
+ for (i = 0; i < (1 << num); i++){
+ __u16 j;
+ __u8 not_valid = 0;
+ __u16 inv_match = 0;
+
+
+ if ((error = build_dt_rule(path->rule, NULL, pos,
+ TARGET_NONE, &new_dt_rule))){
+ CHECK_ERROR("build_dt_rule");
+ if (!first)
+ dimtree_failed(native_dts);
+ return error;
+ }
+ old = path->rule->first_match;
+ new = new_dt_rule->first_dt_match;
+ for (j = 0; j < path->rule->native_mct; j++){
+ if (!(old[j].invert)){
+ new[j].dimid = old[j].dimid;
+ new[j].left = old[j].left;
+ new[j].right = old[j].right;
+ continue;
+ }
+ if (i & (1 << inv_match)){
+ if (old[j].right <
+ MAXKEY(dim2btype[old[j].dimid])){
+ new[j].dimid = old[j].dimid;
+ new[j].left = old[j].right + 1;
+ new[j].right =
+ MAXKEY(dim2btype[new[j].dimid]);
+ } else {
+ not_valid = 1;
+ break;
+ }
+ } else {
+ if (old[j].left){
+ new[j].dimid = old[j].dimid;
+ new[j].left = 0;
+ new[j].right = old[j].left - 1;
+ } else {
+ not_valid = 1;
+ break;
+ }
+ }
+ inv_match++;
+ }
+ if (not_valid){
+ dt_rule_free(new_dt_rule);
+ continue;
+ }
+ if (first){
+ if ((error = ptrblock_append(&chain->end,
+ (void *) new_dt_rule))){
+ CHECK_ERROR("ptrblock_append");
+ dt_rule_free(new_dt_rule);
+ return error;
+ }
+ }
+ if ((error = dimtree_insert(path->dimtree, new_dt_rule,
+ ORIGIN_ALL, first, DONT_COMMIT))){
+ CHECK_ERROR("dimtree_insert");
+ return error;
+ }
+ if (first)
+ first = 0;
+ }
+ return HE_OK;
+}
+
+
+
+/* add hipac_chain 'to' to the next_chain block of hipac_chain 'from'.
+ if 'from' already contains a reference to hipac_chain 'to' then the
+ corresponding count field is incremented by 1, otherwise a new
+ next_chain_elem with its count field set to 1 is added to the
+ next_chain block.
+ possible errors: HE_LOW_MEMORY, HE_IMPOSSIBLE_CONDITION */
+static inline hipac_error
+add_next_chain(struct hipac_chain *from, struct hipac_chain *to)
+{
+ hipac_error error;
+ struct next_chain_elem *nc;
+
+ if (from->next_chains){
+ __u32 i;
+ for (i = 0; i < from->next_chains->len; i++){
+ nc = STRBLOCK_ITH(from->next_chains, i,
+ struct next_chain_elem *);
+ if (nc->chain == to){
+ nc->count++;
+ return HE_OK;
+ }
+ }
+ }
+
+ nc = hp_alloc(sizeof(*nc), ADD);
+ if (!nc)
+ LOW_MEM("next_chain alloc failed!");
+ nc->count = 1;
+ nc->chain = to;
+ error = strblock_append_check(&from->next_chains, nc, sizeof(*nc));
+ hp_free(nc);
+ CHECK_ERROR("strblock_append");
+ return error;
+}
+
+
+
+/* remove one reference to hipac_chain 'to' from the next_chain block
+ of hipac_chain 'from'. */
+static inline void
+delete_next_chain(struct hipac_chain *from, const struct hipac_chain *to)
+{
+ struct next_chain_elem *nc;
+
+ if (from->next_chains){
+ __u32 i;
+ for (i = 0; i < from->next_chains->len; i++){
+ nc = STRBLOCK_ITH(from->next_chains, i,
+ struct next_chain_elem *);
+ if (nc->chain == to){
+ if (nc->count > 1){
+ nc->count--;
+ } else {
+ strblock_delete_pos(&from->next_chains,
+ i);
+ }
+ break;
+ }
+ }
+ }
+}
+
+
+
+/* recursively insert jump rule 'rule' into hipac data structures
+ and dimtrees. in case of an error changes must be undone
+ externally via delete_jump_from_hipac_layer(),
+ delete_dt_rules_from_dt_chains() and dimtree_chain_fix().
+ attention: in case of an success does NOT commit the changes.
+ don't forget to eventually commit the modifications
+ externally via dimtree_commit().
+ possible errors: HE_LOW_MEMORY, HE_LOOP_DETECTED, HE_REC_LIMIT,
+ HE_RULE_ORIGIN_MISMATCH, HE_RULE_RREFIX_MISMATCH,
+ HE_IMPOSSIBLE_CONDITION */
+hipac_error
+insert_jump_rec(const struct hipac_chain *org, const struct ptrblock *next,
+ const struct path_ *path, const __u32 ins_num,
+ struct chain_rule *rule, __u32 depth)
+{
+ hipac_error error;
+ struct list_head *lh;
+ struct chain_rule *currule;
+ struct path_ *new_path;
+ struct hipac_chain *chain = NULL;
+ __u32 i;
+
+ if (depth > HIPAC_REC_LIMIT)
+ return HE_REC_LIMIT;
+
+ chain_hash_lookup((void *) &rule->r + rule->r.target_offset, &chain);
+
+ if (org == chain)
+ return HE_LOOP_DETECTED;
+
+ for (i = 0; i < ins_num; i++){
+ if ((error = add_path(chain, path + i, rule))){
+ CHECK_ERROR("add_path");
+ for (; i > 0; i--){
+ paths_delete_tail(&chain->paths);
+ ptrblock_delete_tail(&chain->start);
+ }
+ return error;
+ }
+ if ((error = add_chain_start(chain,
+ P_ELEM(chain->paths,
+ chain->paths->len - 1),
+ ((struct dt_rule *)
+ next->p[i])->spec.pos))){
+ CHECK_ERROR("add_chain_start");
+ paths_delete_tail(&chain->paths);
+ for (; i > 0; i--){
+ paths_delete_tail(&chain->paths);
+ ptrblock_delete_tail(&chain->start);
+ }
+ return error;
+ }
+ }
+
+ new_path = P_ELEM(chain->paths, chain->paths->len - ins_num);
+
+ list_for_each(lh, &chain->head){
+ currule = list_entry(lh, struct chain_rule, head);
+ if (IS_JUMP_RULE(currule)){
+ if ((error = insert_jump_rec(org, next,
+ new_path, ins_num,
+ currule, depth + 1))){
+ CHECK_ERROR("insert_jump_rec");
+ return error;
+ }
+ } else for (i = 0; i < ins_num; i++){
+ if ((error = insert_into_dt(new_path + i, currule,
+ ((struct dt_rule *)
+ next->p[i])->spec.pos,
+ DONT_COMMIT))){
+ CHECK_ERROR("insert_into_dt");
+ return error;
+ }
+ }
+ }
+ for (i = 0; i < ins_num; i++){
+ if ((error = add_chain_end(chain, new_path + i,
+ ((struct dt_rule *)
+ next->p[i])->spec.pos))){
+ CHECK_ERROR("add_chain_end");
+ return error;
+ }
+ }
+
+ return HE_OK;
+}
+
+
+
+/* delete all entries in the hipac layer data structures corresponding to
+ jump rule 'rule'. all entries in hipac_chain path, start and end blocks
+ pointing to dt_rules with positions > prev and < next are deleted.
+ attention: be sure that those rules have been deleted from the dimtrees
+ before and that those changes have been commited. there must NOT
+ be any intervall in any dimtree anymore pointing to one of those
+ rules! BUT the corresponding dt_rules must NOT yet have been
+ deleted from the internal dimtree chains! */
+static void
+delete_jump_from_hipac_layer(const struct hipac_chain *org,
+ const struct ptrblock *prev,
+ const struct ptrblock *next,
+ const struct chain_rule *rule)
+{
+ struct list_head *lh;
+ struct hipac_chain *chain = NULL;
+ struct chain_rule *currule;
+ __u32 i, j , finished = 0, del_num = 0;
+
+ chain_hash_lookup((void *) &rule->r + rule->r.target_offset,
+ &chain);
+
+ if (!chain->start)
+ return;
+
+ for (i = chain->start->len; i > 0; i--){
+ for (j = 0; j < prev->len; j++){
+ if (!chain->paths){
+ finished = 1;
+ break;
+ }
+ if ((P_ELEM_DIMTREE(chain->paths, i - 1)
+ == P_ELEM_DIMTREE(org->paths, j))
+ && (((struct dt_rule *)
+ chain->start->p[i - 1])->spec.pos
+ > ((struct dt_rule *) prev->p[j])->spec.pos)
+ && (((struct dt_rule *)
+ chain->start->p[i - 1])->spec.pos
+ < ((struct dt_rule *) next->p[j])->spec.pos)){
+
+ chain->start->p[i - 1] = NULL;
+ paths_delete_pos(&chain->paths, i - 1);
+ del_num++;
+ break;
+ }
+ }
+ if (finished)
+ break;
+ }
+
+ if (!del_num)
+ return;
+
+ ptrblock_delete_multi(&chain->end, chain->start);
+
+ list_for_each(lh, &chain->head){
+ currule = list_entry(lh,
+ struct chain_rule, head);
+ if (IS_JUMP_RULE(currule)){
+ delete_jump_from_hipac_layer(org, prev, next, currule);
+ } else {
+ if (!currule->dtr)
+ break;
+ if (chain->end
+ && chain->end->len == currule->dtr->len)
+ break;
+ ptrblock_delete_multi(&currule->dtr, chain->start);
+ }
+ }
+
+ for (i = chain->start->len; i > 0; i--){
+ if (!chain->start->p[i - 1])
+ ptrblock_delete_pos(&chain->start, i - 1);
+ }
+}
+
+
+
+/* delete all dt_rules between prev and next from the internal dimtrees.
+ all rules with positions > prev and < next are deleted.
+ in case of an error undo all made changes.
+ attention: does NOT commit the changes. don't forget to eventually commit
+ the modifications externally via dimtree_commit().
+ possible errors: HE_LOW_MEMORY, HE_IMPOSSIBLE_CONDITION */
+static inline hipac_error
+delete_dt_rules_from_dimtrees(const struct hipac_chain *chain,
+ const struct ptrblock *prev,
+ const struct ptrblock *next)
+{
+ hipac_error error;
+ __u32 i;
+ struct dt_rule *rule;
+
+ if (!chain || !prev || !next)
+ ARG_ERR;
+
+ for (i = 0; i < prev->len; i++){
+ rule = list_entry(((struct dt_rule *) prev->p[i])->head.next,
+ struct dt_rule, head);
+
+ while (rule->spec.pos ==
+ ((struct dt_rule *) prev->p[i])->spec.pos){
+ rule = list_entry(rule->head.next,
+ struct dt_rule, head);
+ }
+ while (rule != ((struct dt_rule *) next->p[i])){
+ if ((error = dimtree_delete(P_ELEM_DIMTREE(chain->paths,
+ i),
+ rule, DONT_COMMIT))){
+ CHECK_ERROR("dimtree_delete");
+ return error;
+ }
+ rule = list_entry(rule->head.next,
+ struct dt_rule, head);
+ }
+ }
+ return HE_OK;
+}
+
+
+
+/* delete all dt_rules between prev and next from the internal dimtree chains.
+ all rules with positions > prev and < next are deleted.
+ attention: be sure that those rules have been deleted from the dimtrees
+ before and that those changes have been commited. there must NOT
+ be any intervall in any dimtree anymore pointing to one of those
+ rules! */
+static inline void
+delete_dt_rules_from_dt_chains(const struct hipac_chain *chain,
+ const struct ptrblock *prev,
+ const struct ptrblock *next)
+{
+ __u32 i, end_pos;
+ struct dt_rule *start;
+
+ if (!chain || !prev || !next)
+ ARG_MSG;
+
+ for (i = 0; i < prev->len; i++){
+ end_pos = ((struct dt_rule *) next->p[i])->spec.pos - 1;
+ if (((struct dt_rule *) prev->p[i])->spec.pos == end_pos){
+ continue;
+ }
+ start = list_entry(((struct dt_rule *) prev->p[i])->head.next,
+ struct dt_rule, head);
+ while (start->spec.pos ==
+ ((struct dt_rule *) prev->p[i])->spec.pos){
+ start = list_entry(start->head.next,
+ struct dt_rule, head);
+ }
+ dimtree_chain_delete(P_ELEM_DIMTREE(chain->paths, i), start,
+ end_pos);
+ }
+}
+
+
+
+/* insert chain_rule 'rule' into hipac_chain 'chain' and
+ commit the changes. in case of an error undo all made changes.
+ possible errors: HE_LOW_MEMORY, HE_LOOP_DETECTED, HE_REC_LIMIT,
+ HE_RULE_ORIGIN_MISMATCH, HE_RULE_PREFIX_MISMATCH,
+ HE_TARGET_CHAIN_IS_NATIVE,
+ HE_TARGET_CHAIN_NOT_EXISTENT,
+ HE_IMPOSSIBLE_CONDITION */
+static inline hipac_error
+insert(struct hipac_chain *chain, struct chain_rule *rule)
+{
+ hipac_error error;
+ struct ptrblock *prev, *next;
+ __u8 prev_free, next_free;
+
+ if (CHAIN_NOT_CONNECTED(chain)){
+ if (IS_JUMP_RULE(rule)){
+ struct hipac_chain *target_chain;
+ if ((error = chain_hash_lookup((void *) &rule->r
+ + rule->r.target_offset,
+ &target_chain))){
+ chain_rule_free(rule);
+ return HE_TARGET_CHAIN_NOT_EXISTENT;
+ }
+ if (target_chain == chain){
+ chain_rule_free(rule);
+ return HE_LOOP_DETECTED;
+ }
+ if (IS_ROOT_CHAIN(target_chain)){
+ chain_rule_free(rule);
+ return HE_TARGET_CHAIN_IS_NATIVE;
+ }
+ if ((error = detect_loop(target_chain, chain, 1))){
+ chain_rule_free(rule);
+ return error;
+ }
+ if ((error = add_next_chain(chain, target_chain))){
+ chain_rule_free(rule);
+ return error;
+ }
+ target_chain->ref_count++;
+ }
+ chain_insert(chain, rule, INC);
+ return HE_OK;
+ }
+
+ chain_insert(chain, rule, INC);
+ if ((error = get_prev_dt_rules(chain, rule, &prev_free, &prev))){
+ CHECK_ERROR("get_prev_dt_rules");
+ chain_delete(chain, rule->r.pos);
+ chain_rule_free(rule);
+ return error;
+ }
+ if ((error = get_next_dt_rules(chain, rule, &next_free, &next))){
+ CHECK_ERROR("get_next_dt_rules");
+ chain_delete(chain, rule->r.pos);
+ chain_rule_free(rule);
+ if (prev_free)
+ ptrblock_free(prev);
+ return error;
+ }
+
+
+ if (likely(IS_NOT_JUMP_RULE(rule))){
+ __u32 i;
+ __u8 commit = DONT_COMMIT;
+ if (next->len == 1)
+ commit = COMMIT;
+ for (i = 0; i < next->len; i++){
+ if ((error =
+ insert_into_dt(P_ELEM(chain->paths, i), rule,
+ ((struct dt_rule *)
+ next->p[i])->spec.pos, commit))){
+ CHECK_ERROR("insert_into_dt");
+ dimtree_failed(native_dts);
+ delete_dt_rules_from_dt_chains(chain,
+ prev, next);
+ dimtree_chain_fix(native_dts);
+ chain_delete(chain, rule->r.pos);
+ chain_rule_free(rule);
+ if (prev_free)
+ ptrblock_free(prev);
+ if (next_free)
+ ptrblock_free(next);
+ return error;
+ }
+ }
+ if (!commit)
+ dimtree_commit(native_dts);
+ } else {
+ struct hipac_chain *target_chain;
+ if ((error = chain_hash_lookup((void *) &rule->r
+ + rule->r.target_offset,
+ &target_chain))){
+ CHECK_ERROR("chain_hash_lookup");
+ chain_delete(chain, rule->r.pos);
+ chain_rule_free(rule);
+ if (prev_free)
+ ptrblock_free(prev);
+ if (next_free)
+ ptrblock_free(next);
+ return HE_TARGET_CHAIN_NOT_EXISTENT;
+ }
+ if (target_chain == chain){
+ chain_delete(chain, rule->r.pos);
+ chain_rule_free(rule);
+ if (prev_free)
+ ptrblock_free(prev);
+ if (next_free)
+ ptrblock_free(next);
+ return HE_LOOP_DETECTED;
+ }
+ if (IS_ROOT_CHAIN(target_chain)){
+ chain_delete(chain, rule->r.pos);
+ chain_rule_free(rule);
+ if (prev_free)
+ ptrblock_free(prev);
+ if (next_free)
+ ptrblock_free(next);
+ return HE_TARGET_CHAIN_IS_NATIVE;
+ }
+ if ((error = add_next_chain(chain, target_chain))){
+ CHECK_ERROR("add_next_chain");
+ chain_delete(chain, rule->r.pos);
+ chain_rule_free(rule);
+ if (prev_free)
+ ptrblock_free(prev);
+ if (next_free)
+ ptrblock_free(next);
+ return error;
+ }
+ if ((error = insert_jump_rec(chain, next,
+ P_ELEM(chain->paths, 0),
+ chain->paths->len, rule, 1))){
+ CHECK_ERROR("insert_jump_rec");
+ dimtree_failed(native_dts);
+ delete_jump_from_hipac_layer(chain, prev, next, rule);
+ delete_dt_rules_from_dt_chains(chain, prev, next);
+ dimtree_chain_fix(native_dts);
+ delete_next_chain(chain, target_chain);
+ chain_delete(chain, rule->r.pos);
+ chain_rule_free(rule);
+ if (prev_free)
+ ptrblock_free(prev);
+ if (next_free)
+ ptrblock_free(next);
+ return error;
+ }
+ dimtree_commit(native_dts);
+ target_chain->ref_count++;
+ }
+ if (prev_free)
+ ptrblock_free(prev);
+ if (next_free)
+ ptrblock_free(next);
+ return HE_OK;
+}
+
+
+
+/* delete chain_rule 'rule' from hipac_chain 'chain' and commit
+ the changes. all representations of that rule in the internal
+ dimtrees are removed.
+ in case of an error undo all made changes.
+ possible errors: HE_LOW_MEMORY, HE_IMPOSSIBLE_CONDITION */
+static inline hipac_error
+delete(struct hipac_chain* chain, struct chain_rule* rule)
+{
+ hipac_error error;
+ __u8 inv;
+
+ if (unlikely(CHAIN_NOT_CONNECTED(chain))){
+ if (unlikely(IS_JUMP_RULE(rule))){
+ struct hipac_chain *target_chain = NULL;
+ chain_hash_lookup((void *) &rule->r
+ + rule->r.target_offset,
+ &target_chain);
+ delete_next_chain(chain, target_chain);
+ target_chain->ref_count--;
+ }
+ chain_delete(chain, rule->r.pos);
+ chain_rule_destroy(rule);
+ return HE_OK;
+ }
+
+ inv = count_inv_matches(rule->r.first_match,
+ rule->r.native_mct);
+
+ if (likely(!inv && IS_NOT_JUMP_RULE(rule))){
+ __u32 i;
+ __u8 commit = 0;
+ if (rule->dtr->len == 1){
+ commit = 1;
+ }
+ for (i = 0; i < rule->dtr->len; i++){
+ if ((error =
+ dimtree_delete(P_ELEM_DIMTREE(chain->paths, i),
+ (struct dt_rule *) rule->dtr->p[i],
+ commit))){
+ CHECK_ERROR("dimtree_delete");
+ if (!commit)
+ dimtree_failed(native_dts);
+ return error;
+ }
+ }
+ if (!commit)
+ dimtree_commit(native_dts);
+ for (i = 0; i < rule->dtr->len; i++){
+ dimtree_chain_delete(P_ELEM_DIMTREE(chain->paths, i),
+ (struct dt_rule *) rule->dtr->p[i],
+ ((struct dt_rule *)
+ rule->dtr->p[i])->spec.pos);
+ }
+ } else {
+ struct ptrblock *prev, *next;
+ __u8 prev_free, next_free;
+
+ if ((error = get_prev_dt_rules(chain, rule,
+ &prev_free, &prev))){
+ CHECK_ERROR("get_prev_dt_rules");
+ return error;
+ }
+ if ((error = get_next_dt_rules(chain, rule,
+ &next_free, &next))){
+ CHECK_ERROR("get_next_dt_rules");
+ if (prev_free)
+ ptrblock_free(prev);
+ return error;
+ }
+ if ((error = delete_dt_rules_from_dimtrees(chain,
+ prev, next))){
+ CHECK_ERROR("delete_dt_rules_from_dimtrees");
+ dimtree_failed(native_dts);
+ if (prev_free)
+ ptrblock_free(prev);
+ if (next_free)
+ ptrblock_free(next);
+ return error;
+ }
+ dimtree_commit(native_dts);
+ if (unlikely(IS_JUMP_RULE(rule))){
+ struct hipac_chain *target_chain = NULL;
+ chain_hash_lookup((void *) &rule->r + rule->r.target_offset,
+ &target_chain);
+ delete_next_chain(chain, target_chain);
+ target_chain->ref_count--;
+ delete_jump_from_hipac_layer(chain, prev, next, rule);
+ }
+ delete_dt_rules_from_dt_chains(chain, prev, next);
+ if (prev_free)
+ ptrblock_free(prev);
+ if (next_free)
+ ptrblock_free(next);
+ }
+ dimtree_chain_fix(native_dts);
+ chain_delete(chain, rule->r.pos);
+ chain_rule_destroy(rule);
+ return HE_OK;
+}
+
+
+
+/* replace chain_rule 'old_rule' in hipac_chain 'chain' with
+ chain_rule 'new_rule' and commit the changes.
+ in case of an error undo all made changes.
+ possible errors: HE_LOW_MEMORY, HE_LOOP_DETECTED, HE_REC_LIMIT,
+ HE_RULE_ORIGIN_MISMATCH, HE_RULE_PREFIX_MISMATCH,
+ HE_TARGET_CHAIN_IS_NATIVE,
+ HE_TARGET_CHAIN_NOT_EXISTENT,
+ HE_IMPOSSIBLE_CONDITION */
+static inline hipac_error
+replace(struct hipac_chain *chain, struct chain_rule *old_rule,
+ struct chain_rule *new_rule)
+{
+ hipac_error error;
+ struct ptrblock *prev_old, *prev_new, *next_old, *next_new;
+ __u8 prev_free_old, prev_free_new, next_free_old, next_free_new;
+ struct hipac_chain *target_chain = NULL;
+
+ if (CHAIN_NOT_CONNECTED(chain)){
+ if (IS_JUMP_RULE(new_rule)){
+ if ((error =
+ chain_hash_lookup((void *) &new_rule->r
+ + new_rule->r.target_offset,
+ &target_chain))){
+ chain_rule_free(new_rule);
+ return HE_TARGET_CHAIN_NOT_EXISTENT;
+ }
+ if (target_chain == chain){
+ chain_rule_free(new_rule);
+ return HE_LOOP_DETECTED;
+ }
+ if (IS_ROOT_CHAIN(target_chain)){
+ chain_rule_free(new_rule);
+ return HE_TARGET_CHAIN_IS_NATIVE;
+ }
+ if ((error = detect_loop(target_chain, chain, 1))){
+ chain_rule_free(new_rule);
+ return error;
+ }
+ if ((error = add_next_chain(chain, target_chain))){
+ chain_rule_free(new_rule);
+ return error;
+ }
+ target_chain->ref_count++;
+ }
+ if (IS_JUMP_RULE(old_rule)){
+ chain_hash_lookup((void *) &old_rule->r
+ + old_rule->r.target_offset,
+ &target_chain);
+ delete_next_chain(chain, target_chain);
+ target_chain->ref_count--;
+ }
+ chain_delete(chain, old_rule->r.pos);
+ chain_rule_destroy(old_rule);
+ chain_insert(chain, new_rule, INC);
+ return HE_OK;
+ }
+
+ if ((error = get_prev_dt_rules(chain, old_rule,
+ &prev_free_new, &prev_new))){
+ CHECK_ERROR("get_prev_dt_rules");
+ chain_rule_free(new_rule);
+ return error;
+ }
+ if ((error = get_next_dt_rules(chain, old_rule,
+ &next_free_old, &next_old))){
+ CHECK_ERROR("get_next_dt_rules");
+ chain_rule_free(new_rule);
+ if (prev_free_new)
+ ptrblock_free(prev_new);
+ return error;
+ }
+ if ((error = delete_dt_rules_from_dimtrees(chain,
+ prev_new, next_old))){
+ CHECK_ERROR("delete_dt_rules_from_dimtrees");
+ dimtree_failed(native_dts);
+ chain_rule_free(new_rule);
+ if (prev_free_new)
+ ptrblock_free(prev_new);
+ if (next_free_old)
+ ptrblock_free(next_old);
+ return error;
+ }
+
+ chain_insert(chain, new_rule, INC);
+
+ if ((error = get_next_dt_rules(chain, new_rule,
+ &next_free_new, &next_new))){
+ CHECK_ERROR("get_next_dt_rules");
+ chain_delete(chain, new_rule->r.pos);
+ chain_rule_free(new_rule);
+ dimtree_failed(native_dts);
+ if (prev_free_new)
+ ptrblock_free(prev_new);
+ if (next_free_old)
+ ptrblock_free(next_old);
+ return error;
+ }
+
+ if (likely(IS_NOT_JUMP_RULE(new_rule))){
+ __u32 i;
+ for (i = 0; i < next_new->len; i++){
+ if ((error = insert_into_dt(P_ELEM(chain->paths, i),
+ new_rule,
+ ((struct dt_rule *)
+ next_new->p[i])->spec.pos,
+ DONT_COMMIT))){
+ CHECK_ERROR("insert_into_dt");
+ dimtree_failed(native_dts);
+ delete_dt_rules_from_dt_chains(chain,
+ prev_new,
+ next_new);
+ dimtree_chain_fix(native_dts);
+ chain_delete(chain, new_rule->r.pos);
+ chain_rule_free(new_rule);
+ if (prev_free_new)
+ ptrblock_free(prev_new);
+ if (next_free_old)
+ ptrblock_free(next_old);
+ if (next_free_new)
+ ptrblock_free(next_new);
+ return error;
+ }
+ }
+ if ((error = get_prev_dt_rules(chain, old_rule,
+ &prev_free_old, &prev_old))){
+ CHECK_ERROR("get_prev_dt_rules");
+ dimtree_failed(native_dts);
+ delete_dt_rules_from_dt_chains(chain, prev_new,
+ next_new);
+ dimtree_chain_fix(native_dts);
+ chain_delete(chain, new_rule->r.pos);
+ chain_rule_free(new_rule);
+ if (prev_free_new)
+ ptrblock_free(prev_new);
+ if (next_free_old)
+ ptrblock_free(next_old);
+ if (next_free_new)
+ ptrblock_free(next_new);
+ return error;
+ }
+ } else {
+ if ((error = chain_hash_lookup((void *) &new_rule->r
+ + new_rule->r.target_offset,
+ &target_chain))){
+ CHECK_ERROR("chain_hash_lookup");
+ chain_delete(chain, new_rule->r.pos);
+ chain_rule_free(new_rule);
+ dimtree_failed(native_dts);
+ if (prev_free_new)
+ ptrblock_free(prev_new);
+ if (next_free_old)
+ ptrblock_free(next_old);
+ if (next_free_new)
+ ptrblock_free(next_new);
+ return HE_TARGET_CHAIN_NOT_EXISTENT;
+ }
+ if (target_chain == chain){
+ chain_delete(chain, new_rule->r.pos);
+ chain_rule_free(new_rule);
+ dimtree_failed(native_dts);
+ if (prev_free_new)
+ ptrblock_free(prev_new);
+ if (next_free_old)
+ ptrblock_free(next_old);
+ if (next_free_new)
+ ptrblock_free(next_new);
+ return HE_LOOP_DETECTED;
+ }
+ if (IS_ROOT_CHAIN(target_chain)){
+ chain_delete(chain, new_rule->r.pos);
+ chain_rule_free(new_rule);
+ dimtree_failed(native_dts);
+ if (prev_free_new)
+ ptrblock_free(prev_new);
+ if (next_free_old)
+ ptrblock_free(next_old);
+ if (next_free_new)
+ ptrblock_free(next_new);
+ return HE_TARGET_CHAIN_IS_NATIVE;
+ }
+ if ((error = add_next_chain(chain, target_chain))){
+ CHECK_ERROR("add_next_chain");
+ chain_delete(chain, new_rule->r.pos);
+ chain_rule_free(new_rule);
+ dimtree_failed(native_dts);
+ if (prev_free_new)
+ ptrblock_free(prev_new);
+ if (next_free_old)
+ ptrblock_free(next_old);
+ if (next_free_new)
+ ptrblock_free(next_new);
+ return error;
+ }
+ if ((error = insert_jump_rec(chain, next_new,
+ P_ELEM(chain->paths, 0),
+ chain->paths->len, new_rule, 1))){
+ CHECK_ERROR("insert_jump_rec");
+ dimtree_failed(native_dts);
+ delete_jump_from_hipac_layer(chain, prev_new,
+ next_new, new_rule);
+ delete_dt_rules_from_dt_chains(chain, prev_new,
+ next_new);
+ dimtree_chain_fix(native_dts);
+ delete_next_chain(chain, target_chain);
+ chain_delete(chain, new_rule->r.pos);
+ chain_rule_free(new_rule);
+ if (prev_free_new)
+ ptrblock_free(prev_new);
+ if (next_free_old)
+ ptrblock_free(next_old);
+ if (next_free_new)
+ ptrblock_free(next_new);
+ return error;
+ }
+ if ((error = get_prev_dt_rules(chain, old_rule,
+ &prev_free_old, &prev_old))){
+ CHECK_ERROR("get_prev_dt_rules");
+ dimtree_failed(native_dts);
+ delete_jump_from_hipac_layer(chain, prev_new,
+ next_new, new_rule);
+ delete_dt_rules_from_dt_chains(chain, prev_new,
+ next_new);
+ dimtree_chain_fix(native_dts);
+ delete_next_chain(chain, target_chain);
+ chain_delete(chain, new_rule->r.pos);
+ chain_rule_free(new_rule);
+ if (prev_free_new)
+ ptrblock_free(prev_new);
+ if (next_free_old)
+ ptrblock_free(next_old);
+ if (next_free_new)
+ ptrblock_free(next_new);
+ return error;
+ }
+ target_chain->ref_count++;
+ }
+ dimtree_commit(native_dts);
+
+ if (likely(IS_JUMP_RULE(old_rule))){
+ chain_hash_lookup((void *) &old_rule->r
+ + old_rule->r.target_offset,
+ &target_chain);
+ delete_next_chain(chain, target_chain);
+ target_chain->ref_count--;
+ delete_jump_from_hipac_layer(chain, prev_old, next_old,
+ old_rule);
+ }
+ delete_dt_rules_from_dt_chains(chain, prev_old, next_old);
+ dimtree_chain_fix(native_dts);
+ chain_delete(chain, old_rule->r.pos);
+ chain_rule_destroy(old_rule);
+ if (prev_free_old)
+ ptrblock_free(prev_old);
+ if (prev_free_new)
+ ptrblock_free(prev_new);
+ if (next_free_old)
+ ptrblock_free(next_old);
+ if (next_free_new)
+ ptrblock_free(next_new);
+ return HE_OK;
+}
+
+
+
+
+
+/*
+ * hipac_* functions
+ */
+
+
+/* init hipac data structures;
+ MUST be called once at the beginning in order to let the other
+ operations work properly!
+ dimid_to_bittype: assigns dimids to bit types.
+ i-th element of the array contains the bit type
+ of dimension id i
+ extract: functions to extract certain fields from a packet.
+ the function at position i of the array returns
+ the entry in a packet that corresponds to
+ dimension id i (i.e. the source ip of the packet)
+ len: length of the dim2btype and extract array
+ copycon: constructor function
+ destroy: destructor function
+ match: match executor function
+ target: target executor function
+ eq: equality function to compare rules
+ maxmem: maximum allowed memory consumption
+ possible errors: HE_LOW_MEMORY, HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_init(const __u8 dimid_to_bittype[], const hipac_extract_t extract[],
+ const __u8 len, hipac_copy_constructor_t copycon,
+ hipac_destroy_exec_t destroy, hipac_match_exec_t match,
+ hipac_target_exec_t target, hipac_eq_exec_t eq,
+ const __u64 maxmem)
+{
+
+ if (unlikely(!dimid_to_bittype || !extract || !copycon || !destroy ||
+ !match || !target || !eq ))
+ ARG_ERR;
+
+ mem_max = maxmem;
+ d2blen = len;
+ current_chain = NULL;
+ chain_hash = NULL;
+ native_dts = NULL;
+ dim2btype = hp_alloc(len, ADD);
+ if (!dim2btype)
+ LOW_MEM("dim2btype alloc failed!");
+ extract_fn = hp_alloc(len * sizeof(void *), ADD);
+ if (!extract_fn){
+ hp_free(dim2btype);
+ LOW_MEM("extract_fn alloc failed!");
+ }
+ chain_hash = ihash_new(CHAIN_HASH_LEN, ADD, CHAIN_HASH_AVR_BUCKET,
+ ihash_func_str, eq_str);
+ if (!chain_hash){
+ hp_free(dim2btype);
+ hp_free(extract_fn);
+ LOW_MEM("ihash_new failed!");
+ }
+ memcpy(dim2btype, dimid_to_bittype, len);
+ memcpy(extract_fn, extract, len * sizeof(void *));
+ copy_fn = copycon;
+ destroy_fn = destroy;
+ match_fn = match;
+ target_fn = target;
+ eq_fn = eq;
+ return HE_OK;
+}
+
+
+
+/* free all hipac data structures;
+ MUST be called once in the end
+ attention: make sure there are no external accesses to hipac
+ data structures taking place anymore! */
+void
+hipac_exit(void)
+{
+ if (native_dts){
+ __u8 i;
+ for(i = 0; i < native_dts->len; i++){
+ dimtree_free((struct dimtree*) native_dts->p[i]);
+ }
+ ptrblock_free(native_dts);
+ }
+ hp_free(dim2btype);
+ hp_free(extract_fn);
+ IHASH_VAL_ITERATE(chain_hash, struct hipac_chain *, chain_free);
+ ihash_free(chain_hash);
+ hp_mem_exit();
+}
+
+
+
+/* return new hipac data structure
+ name: name of the public chain
+ name_intern: name of the internal dimtree chain
+ policy: initial policy
+ origin: bitvector uniq to this data structure
+ hipac: pointer to a pointer to the resulting hipac data
+ structure. use as first argument to hipac_match()
+ possible errors: HE_LOW_MEMORY, HE_NATIVE_CHAIN_EXISTS,
+ HE_CHAIN_EXISTS, HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_new(const char *name, const char* name_intern, const __u8 policy,
+ const __u32 origin, void **hipac)
+{
+ hipac_error error;
+ struct hipac_chain *chain;
+ struct dt_rule *start, *end;
+ struct prefix_rule *prefix_rule;
+ struct path_ *new_path;
+ __u32 i, j, list_pos = 0;
+
+ if (unlikely(!name || !name_intern || !hipac))
+ ARG_ERR;
+
+ for (i = 0; i < chain_hash->len; i++) {
+ if (chain_hash->bucket[i] == NULL) {
+ continue;
+ }
+ for (j = 0; j < chain_hash->bucket[i]->len; j++) {
+ struct hipac_chain *c;
+ c = chain_hash->bucket[i]->kv[j].val;
+ if (c->dimtree && list_pos <= c->list_pos) {
+ list_pos = c->list_pos + 1;
+ }
+ }
+ }
+
+ if (native_dts){
+ __u32 i = 0;
+ for (i = 0; i < native_dts->len; i++)
+ if (!strcmp(((struct dimtree *)native_dts->p[i])
+ ->chain->name, name_intern))
+ return HE_NATIVE_CHAIN_EXISTS;
+ }
+
+ if ((error = chain_new(name, &chain, list_pos))){
+ CHECK_ERROR("chain_new");
+ return error;
+ }
+
+ if ((error = build_dt_rule(NULL, NULL, 0, TARGET_DUMMY, &start))){
+ CHECK_ERROR("build_dt_rule");
+ chain_free(chain);
+ return error;
+ }
+
+ if ((error = ptrblock_append(&chain->start, start))){
+ CHECK_ERROR("ptrblock_append");
+ chain_free(chain);
+ dt_rule_free(start);
+ return error;
+ }
+ if ((error = build_dt_rule(NULL, NULL, 1, policy, &end))){
+ CHECK_ERROR("build_dt_rule");
+ chain_free(chain);
+ dt_rule_free(start);
+ return error;
+ }
+
+ if ((error = ptrblock_append(&chain->end, end))){
+ CHECK_ERROR("ptrblock_append");
+ chain_free(chain);
+ dt_rule_free(start);
+ dt_rule_free(end);
+ return error;
+ }
+ if ((error = dimtree_new((struct dimtree **)hipac,
+ origin, name_intern,
+ start, end))){
+ CHECK_ERROR("dimtree_new");
+ chain_free(chain);
+ dt_rule_free(start);
+ dt_rule_free(end);
+ return error;
+ }
+
+ if ((error = ptrblock_append(&native_dts,
+ *(struct dimtree**) hipac))){
+ CHECK_ERROR("ptrblock_append");
+ dimtree_free(*(struct dimtree**) hipac);
+ chain_free(chain);
+ return error;
+ }
+
+ prefix_rule = hp_alloc(sizeof(*prefix_rule), ADD);
+ if (!prefix_rule){
+ dimtree_free(*(struct dimtree**) hipac);
+ chain_free(chain);
+ ptrblock_delete_tail(&native_dts);
+ LOW_MEM("prefix rule alloc failed");
+ }
+ new_path = hp_alloc(sizeof(*new_path), ADD);
+ if (!new_path){
+ hp_free(prefix_rule);
+ dimtree_free(*(struct dimtree**) hipac);
+ chain_free(chain);
+ ptrblock_delete_tail(&native_dts);
+ LOW_MEM("new_path alloc failed");
+ }
+ new_path->dimtree = *(struct dimtree**) hipac;
+ new_path->prev = NULL;
+ new_path->rule = prefix_rule;
+ prefix_rule->origin = ORIGIN_ALL;
+ prefix_rule->exec_matches = NULL;
+ prefix_rule->native_mct = 0;
+ if ((error = strblock_append_check(&chain->paths, new_path,
+ sizeof(*new_path)))){
+ CHECK_ERROR("strblock_append");
+ path_free(new_path);
+ dimtree_free(*(struct dimtree**) hipac);
+ chain_free(chain);
+ ptrblock_delete_tail(&native_dts);
+ return error;
+ }
+ hp_free(new_path);
+
+ if ((error = chain_hash_insert(chain))){
+ CHECK_ERROR("chain_hash_insert");
+ chain_free(chain);
+ dimtree_free(*(struct dimtree**) hipac);
+ ptrblock_delete_tail(&native_dts);
+ return error;
+ }
+ chain->dimtree = *(struct dimtree**) hipac;
+ return HE_OK;
+}
+
+
+
+/* set maximum amount of memory the hipac data structures are
+ allowed to occupy. return LOW_MEMORY if 'mem' is lower than
+ currently allocated memory
+ possible errors: HE_LOW_MEMORY */
+hipac_error
+hipac_set_maxmem(const __u64 mem)
+{
+ if (mem_current_real > mem){
+ LOW_MEM();
+ }
+ mem_max = mem;
+ return HE_OK;
+}
+
+
+
+/* get maximum amount of memory the hipac data structures are
+ allowed to occupy. */
+__u64
+hipac_get_maxmem(void)
+{
+ return mem_max;
+}
+
+
+
+/* set policy of chain with name 'name' to 'policy'.
+ possible errors: HE_CHAIN_NOT_EXISTENT, HE_CHAIN_IS_USERDEFINED,
+ HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_set_policy(const char *name, const __u8 policy)
+{
+ hipac_error error;
+ struct hipac_chain *chain;
+
+ if (unlikely(!name))
+ ARG_ERR;
+ if ((error = chain_hash_lookup(name, &chain))){
+ CHECK_ERROR("chain_hash_lookup");
+ return error;
+ }
+ if (!chain->dimtree)
+ return HE_CHAIN_IS_USERDEFINED;
+ ((struct dt_rule *)(chain->end->p[0]))->spec.action = policy;
+ return HE_OK;
+}
+
+
+
+/* get policy of chain with name 'name' and write it to 'result'.
+ possible errors: HE_CHAIN_NOT_EXISTENT, HE_CHAIN_IS_USERDEFINED,
+ HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_get_policy(const char *name, __u8 *result)
+{
+ hipac_error error;
+ struct hipac_chain *chain;
+
+ if (unlikely(!name || !result))
+ ARG_ERR;
+ if ((error = chain_hash_lookup(name, &chain))){
+ CHECK_ERROR("chain_hash_lookup");
+ return error;
+ }
+ if (!chain->dimtree)
+ return HE_CHAIN_IS_USERDEFINED;
+ *result = ((struct dt_rule *)(chain->end->p[0]))->spec.action;
+ return HE_OK;
+}
+
+
+
+/* create new user-defined chain with name 'name'.
+ possible errors: HE_LOW_MEMORY, HE_CHAIN_EXISTS,
+ HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_new_chain(const char* name)
+{
+ hipac_error error;
+ struct hipac_chain *chain;
+ __u32 i, j, list_pos;
+
+ if (unlikely(!name))
+ ARG_ERR;
+
+ list_pos = chain_hash->elem_ct - (native_dts ? native_dts->len : 0);
+ if ((error = chain_new(name, &chain, list_pos))){
+ CHECK_ERROR("chain_new");
+ return error;
+ }
+ if ((error = chain_hash_insert(chain))){
+ CHECK_ERROR("chain_hash_insert");
+ chain_free(chain);
+ return error;
+ }
+ for (i = 0; i < chain_hash->len; i++) {
+ if (chain_hash->bucket[i] == NULL) {
+ continue;
+ }
+ for (j = 0; j < chain_hash->bucket[i]->len; j++) {
+ struct hipac_chain *c;
+ c = chain_hash->bucket[i]->kv[j].val;
+ if (c->dimtree) {
+ continue;
+ }
+ if (strcmp(c->name, name) > 0) {
+ if (c->list_pos < list_pos) {
+ list_pos = c->list_pos;
+ }
+ c->list_pos++;
+ }
+ }
+ }
+ chain->list_pos = list_pos;
+
+ return HE_OK;
+}
+
+
+
+/* delete all rules in chain with name 'name'.
+ if 'name' is NULL all rules in all chains are deleted.
+ possible errors: HE_LOW_MEMORY, HE_CHAIN_NOT_EXISTENT,
+ HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_flush_chain(const char *name)
+{
+ hipac_error error;
+ struct hipac_chain *chain;
+ struct list_head *lh;
+ struct chain_rule *rule;
+ struct next_chain_elem *n_elem;
+ __u32 i, j;
+
+ if (!name){
+ //flushing all chains
+ for (i = 0; i < chain_hash->len; i++) {
+ if (chain_hash->bucket[i] == NULL) {
+ continue;
+ }
+ for (j = 0; j < chain_hash->bucket[i]->len; j++) {
+ chain = chain_hash->bucket[i]->kv[j].val;
+ if (chain->dimtree){
+ dimtree_flush(chain->dimtree);
+ lh = chain->head.next;
+ while (lh != &chain->head) {
+ rule = list_entry(
+ lh, struct chain_rule,
+ head);
+ lh = lh->next;
+ list_del(lh->prev);
+ chain_rule_destroy(rule);
+ }
+ if (chain->next_chains){
+ strblock_free(
+ chain->next_chains);
+ chain->next_chains = NULL;
+ }
+ } else {
+ chain_flush(chain);
+ }
+ }
+ }
+ return HE_OK;
+ }
+
+ if ((error = chain_hash_lookup(name, &chain)))
+ return error;
+
+
+ if (unlikely(CHAIN_NOT_CONNECTED(chain))){
+ if (chain->next_chains){
+ for (i = 0; i < chain->next_chains->len; i++){
+ n_elem = STRBLOCK_ITH(chain->next_chains, i,
+ struct next_chain_elem *);
+ n_elem->chain->ref_count -= n_elem->count;
+ }
+ strblock_free(chain->next_chains);
+ chain->next_chains = NULL;
+ }
+ lh = chain->head.next;
+ while (lh != &chain->head) {
+ rule = list_entry(lh, struct chain_rule, head);
+ lh = lh->next;
+ list_del(lh->prev);
+ chain_rule_destroy(rule);
+ }
+ return HE_OK;
+ }
+
+
+ if (!chain->dimtree){
+ if ((error = delete_dt_rules_from_dimtrees(chain,
+ chain->start,
+ chain->end))){
+ CHECK_ERROR("delete_dt_rules_from_dimtrees");
+ dimtree_failed(native_dts);
+ return error;
+ }
+ dimtree_commit(native_dts);
+ }
+
+ if (chain->next_chains){
+ for (i = 0; i < chain->next_chains->len; i++){
+ n_elem = STRBLOCK_ITH(chain->next_chains, i,
+ struct next_chain_elem *);
+ n_elem->chain->ref_count -= n_elem->count;
+ }
+ strblock_free(chain->next_chains);
+ chain->next_chains = NULL;
+ }
+
+ lh = chain->head.next;
+ while (lh != &chain->head) {
+ rule = list_entry(lh, struct chain_rule, head);
+ lh = lh->next;
+ list_del(lh->prev);
+ if (IS_JUMP_RULE(rule)){
+ delete_jump_from_hipac_layer(chain, chain->start,
+ chain->end, rule);
+ }
+ chain_rule_destroy(rule);
+ }
+
+ if (chain->dimtree){
+ dimtree_flush(chain->dimtree);
+ } else {
+ delete_dt_rules_from_dt_chains(chain,
+ chain->start, chain->end);
+ dimtree_chain_fix(native_dts);
+ }
+ return HE_OK;
+}
+
+
+
+/* delete user-defined chain with name 'name'.
+ if 'name' is NULL delete all chains that are empty
+ and not referenced from other chains.
+ possible errors: HE_CHAIN_NOT_EXISTENT, HE_CHAIN_IS_NATIVE,
+ HE_CHAIN_NOT_EMPTY, HE_CHAIN_IS_REFERENCED */
+hipac_error
+hipac_delete_chain(const char *name)
+{
+ hipac_error error;
+ struct hipac_chain *chain;
+ __u32 i, j;
+
+ if (!name){
+ //delete all empty and not referenced user-defined chains
+ for (i = 0; i < chain_hash->len; i++) {
+ if (chain_hash->bucket[i] == NULL) {
+ continue;
+ }
+ for (j = 0; j < chain_hash->bucket[i]->len;) {
+ __u32 k, l;
+ chain = chain_hash->bucket[i]->kv[j].val;
+ if (chain->dimtree
+ || !list_empty(&chain->head)
+ || CHAIN_IS_REFERENCED(chain)) {
+ j++;
+ continue;
+ }
+ chain_hash_remove(chain);
+ for (k = 0; k < chain_hash->len; k++) {
+ if (!chain_hash->bucket[k]) {
+ continue;
+ }
+ for (l = 0; l < chain_hash->
+ bucket[k]->len; l++) {
+ struct hipac_chain *c;
+ c = chain_hash->bucket[k]->
+ kv[l].val;
+ if (!c->dimtree &&
+ c->list_pos >
+ chain->list_pos) {
+ c->list_pos--;
+ }
+ }
+ }
+ chain_free(chain);
+ }
+ }
+ return HE_OK;
+ }
+
+ if ((error = chain_hash_lookup(name, &chain)))
+ return HE_CHAIN_NOT_EXISTENT;
+
+ if (chain->dimtree)
+ return HE_CHAIN_IS_NATIVE;
+
+ if (!list_empty(&chain->head))
+ return HE_CHAIN_NOT_EMPTY;
+
+ if (CHAIN_IS_REFERENCED(chain))
+ return HE_CHAIN_IS_REFERENCED;
+
+ chain_hash_remove(chain);
+ for (i = 0; i < chain_hash->len; i++) {
+ struct hipac_chain *c;
+ if (chain_hash->bucket[i] == NULL) {
+ continue;
+ }
+ for (j = 0; j < chain_hash->bucket[i]->len; j++) {
+ c = chain_hash->bucket[i]->kv[j].val;
+ if (!c->dimtree && c->list_pos > chain->list_pos) {
+ c->list_pos--;
+ }
+ }
+ }
+ chain_free(chain);
+ return HE_OK;
+}
+
+
+
+/* rename chain with name 'name' to 'new_name'.
+ possible errors: HE_LOW_MEMORY, HE_CHAIN_EXISTS,
+ HE_CHAIN_NOT_EXISTENT, HE_CHAIN_IS_NATIVE,
+ HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_rename_chain(const char *name, const char *new_name)
+{
+ hipac_error error;
+ struct hipac_chain *old_chain, *new_chain;
+ struct list_head *lh;
+ struct chain_rule *rule;
+ int new_is_larger;
+ char *old;
+ __u32 i, j, k, list_pos;
+
+ if (unlikely(!name || !new_name))
+ ARG_ERR;
+
+ if ((!(error = chain_hash_lookup(new_name, &old_chain))))
+ return HE_CHAIN_EXISTS;
+
+ if ((error = chain_hash_lookup(name, &old_chain)))
+ return error;
+
+ if (old_chain->dimtree)
+ return HE_CHAIN_IS_NATIVE;
+
+ new_chain = hp_alloc(sizeof(*new_chain), ADD);
+ if (!new_chain)
+ return HE_LOW_MEMORY;
+
+ memcpy(new_chain, old_chain, sizeof(*new_chain));
+
+ strncpy(new_chain->name, new_name, HIPAC_CHAIN_NAME_MAX_LEN);
+ new_chain->name[HIPAC_CHAIN_NAME_MAX_LEN - 1] = '\0';
+
+ if ((error = chain_hash_replace(old_chain, new_chain))) {
+ CHECK_ERROR("chain_hash_replace");
+ hp_free(new_chain);
+ return error;
+ }
+ current_chain = NULL;
+
+ if (list_empty(&old_chain->head)) {
+ INIT_LIST_HEAD(&new_chain->head);
+ } else {
+ lh = old_chain->head.next;
+ list_del(&old_chain->head);
+ list_add_tail(&new_chain->head, lh);
+ }
+
+ new_is_larger = (strcmp(new_name, name) > 0);
+ list_pos = old_chain->list_pos;
+ if (!CHAIN_IS_REFERENCED(old_chain)) {
+ for (i = 0; i < chain_hash->len; i++) {
+ struct hipac_chain *chain;
+ if (chain_hash->bucket[i] == NULL) {
+ continue;
+ }
+ for (j = 0; j < chain_hash->bucket[i]->len; j++) {
+ chain = chain_hash->bucket[i]->kv[j].val;
+ if (chain->dimtree)
+ continue;
+ if (new_is_larger) {
+ if (chain->list_pos >
+ old_chain->list_pos &&
+ strcmp(chain->name,
+ new_name) < 0) {
+ if (list_pos <
+ chain->list_pos) {
+ list_pos = chain->
+ list_pos;
+ }
+ chain->list_pos--;
+ }
+ } else {
+ if (chain->list_pos <
+ old_chain->list_pos &&
+ strcmp(chain->name,
+ new_name) > 0) {
+ if (list_pos >
+ chain->list_pos) {
+ list_pos = chain->
+ list_pos;
+ }
+ chain->list_pos++;
+ }
+ }
+ }
+ }
+ new_chain->list_pos = list_pos;
+ hp_free(old_chain);
+ return HE_OK;
+ }
+
+ for (i = 0; i < chain_hash->len; i++) {
+ struct hipac_chain *chain, **next;
+ if (chain_hash->bucket[i] == NULL) {
+ continue;
+ }
+ for (j = 0; j < chain_hash->bucket[i]->len; j++) {
+ chain = chain_hash->bucket[i]->kv[j].val;
+
+ if (chain->next_chains){
+ for (k = 0; k < chain->next_chains->len; k++){
+ next = &STRBLOCK_ITH(
+ chain->next_chains, k,
+ struct next_chain_elem *)
+ ->chain;
+ if (*next == old_chain)
+ *next = new_chain;
+ }
+ }
+
+ list_for_each(lh, &chain->head) {
+ rule = list_entry(lh, struct chain_rule, head);
+ if (IS_JUMP_RULE(rule)){
+ old = (void *) &rule->r
+ + rule->r.target_offset;
+ if (!strcmp(old, name)){
+ strncpy(old, new_name,
+ HIPAC_CHAIN_NAME_MAX_LEN);
+ old[HIPAC_CHAIN_NAME_MAX_LEN
+ - 1] = '\0';
+ }
+ }
+ }
+
+ if (chain->dimtree)
+ continue;
+
+ if (new_is_larger) {
+ if (chain->list_pos > old_chain->list_pos &&
+ strcmp(chain->name, new_name) < 0) {
+ if (list_pos < chain->list_pos) {
+ list_pos = chain->list_pos;
+ }
+ chain->list_pos--;
+ }
+ } else {
+ if (chain->list_pos < old_chain->list_pos &&
+ strcmp(chain->name, new_name) > 0) {
+ if (list_pos > chain->list_pos) {
+ list_pos = chain->list_pos;
+ }
+ chain->list_pos++;
+ }
+ }
+ }
+ }
+ new_chain->list_pos = list_pos;
+ hp_free(old_chain);
+ return HE_OK;
+}
+
+
+
+/* get an array of hipac_chain_info structs containing required infos
+ for a rule listing of chain with name 'name'. if 'name' is NULL
+ return infos for all chains. 'len' specifies the length of the
+ returned struct hipac_chain_info array.
+ attention: don't forget to free the struct hipac_chain_info array
+ after the rule listing via hipac_free_chain_infos()!
+ possible errors: HE_LOW_MEMORY, HE_CHAIN_NOT_EXISTENT,
+ HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_get_chain_infos(const char *name, struct hipac_chain_info **inf,
+ __u32 *len)
+{
+ hipac_error error;
+ struct hipac_chain *chain;
+
+ if (unlikely(!inf || !len))
+ ARG_ERR;
+
+ if (!name){
+ __u32 i, j, e;
+ *len = chain_hash->elem_ct;
+ *inf = hp_alloc(*len * sizeof(**inf), ADD);
+ if (!(*inf)){
+ LOW_MEM("hipac_chain_info alloc failed!");
+ }
+ for (i = 0; i < chain_hash->len; i++) {
+ if (!chain_hash->bucket[i])
+ continue;
+ for (j = 0; j < chain_hash->bucket[i]->len; j++) {
+ chain = chain_hash->bucket[i]->kv[j].val;
+ if (chain->dimtree) {
+ e = chain->list_pos;
+ (*inf)[e].policy = ((struct dt_rule *)
+ (chain->end->p[0]))
+ ->spec.action;
+ } else {
+ e = chain->list_pos +
+ (native_dts ?
+ native_dts->len : 0);
+ (*inf)[e].policy = 0;
+ }
+ (*inf)[e].label = chain->name;
+ (*inf)[e].is_internal_chain = 0;
+ if (list_empty(&chain->head)){
+ (*inf)[e].rule_num = 0;
+ } else {
+ (*inf)[e].rule_num =
+ list_entry(chain->head.prev,
+ struct chain_rule,
+ head)->r.pos;
+ }
+ (*inf)[e].chain_head = &chain->head;
+ }
+ }
+ return HE_OK;
+ }
+
+
+ if ((error = chain_hash_lookup(name, &chain))){
+ // it's not a user-defined chain
+ // check if it's a internal dimtree chain
+ __u32 i;
+ struct dimtree *dt;
+ if (!native_dts)
+ return HE_CHAIN_NOT_EXISTENT;
+ for (i = 0; i < native_dts->len; i++){
+ dt = (struct dimtree *) native_dts->p[i];
+ if (!strcmp(name, dt->chain->name)){
+ *len = 1;
+ *inf = hp_alloc(sizeof(**inf), ADD);
+ if (!(*inf))
+ LOW_MEM();
+ (*inf)[0].label = dt->chain->name;
+ (*inf)[0].policy =
+ list_entry(dt->chain->head.prev,
+ struct dt_rule,
+ head)->spec.action;
+ (*inf)[0].is_internal_chain = 1;
+ (*inf)[0].rule_num = dt->chain->len;
+ (*inf)[0].chain_head = &dt->chain->head;
+ return HE_OK;
+ }
+ }
+ return HE_CHAIN_NOT_EXISTENT;
+ }
+
+ *len = 1;
+ *inf = hp_alloc(sizeof(**inf), ADD);
+ if (!(*inf))
+ LOW_MEM("hipac_chain_info alloc failed!");
+ (*inf)[0].label = chain->name;
+ if (chain->dimtree)
+ (*inf)[0].policy = ((struct dt_rule *)
+ (chain->end->p[0]))->spec.action;
+ else (*inf)[0].policy = 0;
+ (*inf)[0].is_internal_chain = 0;
+ if (list_empty(&chain->head)){
+ (*inf)[0].rule_num = 0;
+ } else {
+ (*inf)[0].rule_num = list_entry(
+ chain->head.prev,
+ struct chain_rule, head)->r.pos;
+ }
+ (*inf)[0].chain_head = &chain->head;
+ return HE_OK;
+}
+
+
+
+/* free array of hipac_chain_info structs that has been allocated
+ before via hipac_get_chain_infos().
+ possible errors: HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_free_chain_infos(struct hipac_chain_info *inf)
+{
+ if (unlikely(!inf))
+ ARG_ERR;
+ hp_free(inf);
+ return HE_OK;
+}
+
+
+
+/* get next hipac_rule 'next' of previous hipac_rule 'prev'.
+ with this function you can walk over the chain during rule listing.
+ to get the first hipac_rule of a chain, set 'prev_rule' to NULL.
+ when the end of the chain is reached or the chain is empty the
+ hipac_error HE_RULE_NOT_EXISTENT is returned.
+ attention: during rule listing of a chain hipac_get_next_rule()
+ must always be called until finally HE_RULE_NOT_EXISTENT
+ is returned!
+ possible errors: HE_LOW_MEMORY, HE_RULE_NOT_EXISTENT,
+ IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_get_next_rule(const struct hipac_chain_info *inf,
+ struct hipac_rule *prev,
+ struct hipac_rule **next)
+{
+ hipac_error error;
+ static struct dt_rule *dt_rule = NULL;
+
+ if (unlikely(!inf || !next))
+ ARG_ERR;
+
+ if (unlikely(!prev)){
+ if (!inf->is_internal_chain){
+ if (unlikely(list_empty(inf->chain_head))){
+ *next = NULL;
+ return HE_RULE_NOT_EXISTENT;
+ } else {
+ *next = &list_entry(inf->chain_head->next,
+ struct chain_rule,
+ head)->r;
+ }
+ } else {
+ if (dt_rule)
+ IMPOSSIBLE_CONDITION("dt_rule is defined!");
+ dt_rule = list_entry(inf->chain_head->next,
+ struct dt_rule, head);
+ if ((error = build_hipac_rule_from_dt_rule(dt_rule,
+ next))){
+ CHECK_ERROR("build_hipac_rule_from_dt_rule");
+ dt_rule = NULL;
+ *next = NULL;
+ return error;
+ }
+ }
+ return HE_OK;
+ }
+
+ if (!inf->is_internal_chain){
+ struct chain_rule *prev_chain_rule;
+ prev_chain_rule = list_entry(prev,
+ struct chain_rule, r);
+ if (prev_chain_rule->head.next == inf->chain_head){
+ *next = NULL;
+ return HE_RULE_NOT_EXISTENT;
+ }
+ *next = &list_entry(prev_chain_rule->head.next,
+ struct chain_rule, head)->r;
+ } else {
+ hp_free(prev);
+ if (!dt_rule)
+ IMPOSSIBLE_CONDITION("dt_rule not defined!");
+ if (dt_rule->head.next == inf->chain_head){
+ dt_rule = NULL;
+ *next = NULL;
+ return HE_RULE_NOT_EXISTENT;
+ }
+ dt_rule = list_entry(dt_rule->head.next,
+ struct dt_rule, head);
+ if ((error = build_hipac_rule_from_dt_rule(dt_rule,
+ next))){
+ CHECK_ERROR("build_hipac_rule_from_dt_rule");
+ dt_rule = NULL;
+ *next = NULL;
+ return error;
+ }
+ }
+ return HE_OK;
+}
+
+
+/* append hipac_rule 'rule' to chain with name 'name'.
+ 'rule->pos' is set to the position of the last rule
+ in the chain + 1.
+ possible errors: HE_LOW_MEMORY, HE_CHAIN_NOT_EXISTENT,
+ HE_LOOP_DETECTED, HE_REC_LIMIT,
+ HE_RULE_ORIGIN_MISMATCH, HE_RULE_PREFIX_MISMATCH,
+ HE_TARGET_CHAIN_NOT_EXISTENT,
+ HE_TARGET_CHAIN_IS_NATIVE,
+ HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_append(const char *name, const struct hipac_rule *rule)
+{
+ hipac_error error;
+ struct hipac_chain *chain;
+ struct chain_rule *new_rule;
+
+ if (unlikely(!name || !rule))
+ ARG_ERR;
+
+ if ((error = chain_hash_lookup(name, &chain)))
+ return error;
+
+ if (unlikely(error = build_chain_rule_from_hipac_rule(rule, &new_rule)))
+ return error;
+
+ new_rule->r.pos = (list_empty(&chain->head)) ?
+ 1 : (list_entry(chain->head.prev,
+ struct chain_rule, head)->r.pos + 1);
+ return insert(chain, new_rule);
+}
+
+
+
+/* insert hipac_rule 'rule' at position 'rule->pos' into chain
+ with name 'name'.
+ possible errors: HE_LOW_MEMORY, HE_CHAIN_NOT_EXISTENT,
+ HE_LOOP_DETECTED, HE_REC_LIMIT,
+ HE_RULE_ORIGIN_MISMATCH, HE_RULE_PREFIX_MISMATCH,
+ HE_TARGET_CHAIN_NOT_EXISTENT,
+ HE_TARGET_CHAIN_IS_NATIVE,
+ HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_insert(const char *name, const struct hipac_rule *rule)
+{
+ hipac_error error;
+ struct hipac_chain *chain;
+ struct chain_rule *new_rule;
+
+ if (unlikely(!name || !rule))
+ ARG_ERR;
+
+ if ((error = chain_hash_lookup(name, &chain)))
+ return error;
+
+ if (unlikely(error = build_chain_rule_from_hipac_rule(rule, &new_rule)))
+ return error;
+
+ return insert(chain, new_rule);
+}
+
+
+
+/* delete hipac_rule with position 'pos' from chain with name 'name'.
+ possible errors: HE_LOW_MEMORY, HE_CHAIN_NOT_EXISTENT,
+ HE_RULE_NOT_EXISTENT, HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_delete_pos(const char *name, const __u32 pos)
+{
+ hipac_error error;
+ struct hipac_chain *chain;
+ struct chain_rule *del_rule;
+
+ if (unlikely(!name))
+ ARG_ERR;
+
+ if ((error = chain_hash_lookup(name, &chain)))
+ return error;
+
+ if ((error = chain_find_rule_with_pos(chain, pos, &del_rule)))
+ return error;
+
+ return delete(chain, del_rule);
+}
+
+
+
+/* find the first rule in chain with name 'name' that equals to
+ hipac_rule 'rule' and delete it.
+ possible errors: HE_LOW_MEMORY, HE_CHAIN_NOT_EXISTENT,
+ HE_RULE_NOT_EXISTENT, HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_delete(const char *name, const struct hipac_rule *rule)
+{
+ hipac_error error;
+ struct hipac_chain *chain;
+ struct chain_rule *del_rule;
+
+ if (unlikely(!name || !rule))
+ ARG_ERR;
+
+ if ((error = chain_hash_lookup(name, &chain)))
+ return error;
+
+ if ((error = chain_find_rule(chain, rule, &del_rule)))
+ return error;
+
+ return delete(chain, del_rule);
+}
+
+
+
+/* replace rule with position 'rule->pos' in chain with name 'name'
+ with hipac_rule 'rule'.
+ possible errors: HE_LOW_MEMORY, HE_CHAIN_NOT_EXISTENT,
+ HE_RULE_NOT_EXISTENT, HE_LOOP_DETECTED,
+ HE_REC_LIMIT, HE_RULE_ORIGIN_MISMATCH,
+ HE_RULE_PREFIX_MISMATCH,
+ HE_TARGET_CHAIN_NOT_EXISTENT,
+ HE_TARGET_CHAIN_IS_NATIVE,
+ HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_replace(const char *name, const struct hipac_rule *rule)
+{
+ hipac_error error;
+ struct hipac_chain *chain;
+ struct chain_rule *del_rule, *new_rule;
+
+ if (unlikely(!name || !rule))
+ ARG_ERR;
+
+ if ((error = chain_hash_lookup(name, &chain)))
+ return error;
+
+ if ((error = chain_find_rule_with_pos(chain, rule->pos,
+ &del_rule)))
+ return error;
+
+ if (unlikely(error = build_chain_rule_from_hipac_rule(rule, &new_rule)))
+ return error;
+
+ return replace(chain, del_rule, new_rule);
+}
+
+
+
+
+
+/*
+ * hipac statistic functions
+ */
+
+
+
+/* get hipac chain statistics
+ possible errors: HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_get_chain_stat(struct hipac_chain_stat *stat)
+{
+ hipac_error error;
+ struct hipac_chain *chain;
+ struct prefix_rule *prefix;
+ struct list_head *lh;
+ struct chain_rule *rule;
+ __u32 i, j, k;
+
+ if (unlikely(!stat))
+ ARG_ERR;
+
+ stat->mem_tight = 0;
+ stat->mem_real = 0;
+ stat->chain_num = chain_hash->elem_ct;
+ stat->rule_num = 0;
+ stat_distribution_init(stat->prefix_stat, 16);
+ stat_distribution_init(stat->incoming_stat, 16);
+ stat_distribution_init(stat->outgoing_stat, 16);
+
+ for (i = 0; i < chain_hash->len; i++) {
+ if (chain_hash->bucket[i] == NULL) {
+ continue;
+ }
+ for (j = 0; j < chain_hash->bucket[i]->len; j++) {
+ chain = chain_hash->bucket[i]->kv[j].val;
+ if ((error = hp_size(chain,
+ &stat->mem_real,
+ &stat->mem_tight)))
+ return error;
+ if ((error = hp_size(chain->next_chains,
+ &stat->mem_real,
+ &stat->mem_tight)))
+ return error;
+ if ((error = hp_size(chain->paths,
+ &stat->mem_real,
+ &stat->mem_tight)))
+ return error;
+ if (chain->paths){
+ for (k = 0; k < chain->paths->len; k++){
+ prefix = P_ELEM_RULE(chain->paths, k);
+ if ((error =
+ hp_size(prefix,
+ &stat->mem_real,
+ &stat->mem_tight)))
+ return error;
+ if (prefix
+ && (error =
+ hp_size(prefix->exec_matches,
+ &stat->mem_real,
+ &stat->mem_tight)))
+ return error;
+ }
+ }
+ if ((error = hp_size(chain->start,
+ &stat->mem_real,
+ &stat->mem_tight)))
+ return error;
+ if ((error = hp_size(chain->end,
+ &stat->mem_real,
+ &stat->mem_tight)))
+ return error;
+
+ if (!list_empty(&chain->head)){
+ stat->rule_num +=
+ list_entry(chain->head.prev,
+ struct chain_rule,
+ head)->r.pos;
+ }
+
+ if (chain->paths)
+ stat_distribution_add(stat->prefix_stat, 16,
+ chain->paths->len);
+ else stat_distribution_add(stat->prefix_stat, 16, 0);
+ stat_distribution_add(stat->incoming_stat, 16,
+ chain->ref_count);
+ if (chain->next_chains)
+ stat_distribution_add(stat->outgoing_stat, 16,
+ chain->next_chains->len);
+ else stat_distribution_add(stat->outgoing_stat, 16, 0);
+
+ list_for_each(lh, &chain->head) {
+ rule = list_entry(lh, struct chain_rule, head);
+ if ((error = hp_size(rule,
+ &stat->mem_real,
+ &stat->mem_tight)))
+ return error;
+ if ((error = hp_size(rule->dtr,
+ &stat->mem_real,
+ &stat->mem_tight)))
+ return error;
+ }
+ }
+ }
+ return HE_OK;
+}
+
+
+
+/* get hipac rule statistics
+ returned statistic constains all rules of those chains that are
+ reachable from the root chain represented by the 'hipac' pointer.
+ possible errors: HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_get_rule_stat(void *hipac, struct hipac_rule_stat *stat)
+{
+ struct hipac_chain *chain;
+ struct list_head *lh;
+ struct chain_rule *rule;
+ __u32 i, j, k, inv;
+ __u8 found;
+
+ if (unlikely(!hipac || !stat))
+ ARG_ERR;
+
+ stat->rule_num = 0;
+ stat->exec_match_num = 0;
+ stat->exec_target_num = 0;
+ stat->jump_target_num = 0;
+ stat->return_target_num = 0;
+ stat_distribution_init(stat->hipac_match_stat, 16);
+ stat_distribution_init(stat->inv_rules_stat, 16);
+
+ for (i = 0; i < chain_hash->len; i++) {
+ if (chain_hash->bucket[i] == NULL) {
+ continue;
+ }
+ for (j = 0; j < chain_hash->bucket[i]->len; j++) {
+ chain = chain_hash->bucket[i]->kv[j].val;
+ found = 0;
+ if (chain->paths){
+ for (k = 0; k < chain->paths->len; k++){
+ if (hipac ==
+ P_ELEM_DIMTREE(chain->paths, k)){
+ found = 1;
+ break;
+ }
+ }
+ }
+ if (!found)
+ continue;
+ if (!list_empty(&chain->head)){
+ stat->rule_num +=
+ list_entry(chain->head.prev,
+ struct chain_rule,
+ head)->r.pos;
+ }
+
+ list_for_each(lh, &chain->head) {
+ rule = list_entry(lh, struct chain_rule, head);
+ if (rule->r.match_offset)
+ stat->exec_match_num++;
+ if (rule->r.action == TARGET_EXEC)
+ stat->exec_target_num++;
+ if (rule->r.action == TARGET_CHAIN)
+ stat->jump_target_num++;
+ if (rule->r.action == TARGET_RETURN)
+ stat->return_target_num++;
+ stat->hipac_match_stat[rule->r.native_mct]++;
+ inv = count_inv_matches(rule->r.first_match,
+ rule->r.native_mct);
+ stat->inv_rules_stat[inv]++;
+ }
+ }
+ }
+ return HE_OK;
+}
+
+
+
+/* get hipac user statistics
+ possible errors: HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_get_user_stat(struct hipac_user_stat *stat)
+{
+ struct hipac_chain *chain;
+ __u32 i, j;
+
+ if (unlikely(!stat))
+ ARG_ERR;
+
+ stat->total_mem_tight = mem_current_tight;
+ stat->total_mem_real = mem_current_real;
+ stat->chain_num = chain_hash->elem_ct;
+ stat->rule_num = 0;
+
+ for (i = 0; i < chain_hash->len; i++) {
+ if (chain_hash->bucket[i] == NULL) {
+ continue;
+ }
+ for (j = 0; j < chain_hash->bucket[i]->len; j++) {
+ chain = chain_hash->bucket[i]->kv[j].val;
+ if (!list_empty(&chain->head)){
+ stat->rule_num +=
+ list_entry(chain->head.prev,
+ struct chain_rule,
+ head)->r.pos;
+ }
+ }
+ }
+ return HE_OK;
+}
+
+
+
+#ifdef DEBUG
+hipac_error
+hipac_get_dt_rule_ptrs(const char *name, const __u32 pos,
+ void **res)
+{
+ hipac_error error;
+ struct hipac_chain *chain;
+ struct chain_rule *rule;
+
+ if (unlikely(!name || !res))
+ ARG_ERR;
+
+ if ((error = chain_hash_lookup(name, &chain)))
+ return error;
+
+ if (list_empty(&chain->head)){
+ *res = chain->end;
+ return HE_OK;
+ }
+ rule = list_entry(chain->head.prev, struct chain_rule, head);
+ if (pos > rule->r.pos){
+ if (pos == rule->r.pos + 1){
+ *res = chain->end;
+ return HE_OK;
+ } else {
+ return HE_RULE_NOT_EXISTENT;
+ }
+ }
+
+ if (unlikely(error = chain_find_rule_with_pos(chain, pos, &rule)))
+ return error;
+
+ *res = rule->dtr;
+ return HE_OK;
+}
+
+
+
+__u8
+dt_rules_have_same_position(void *hipac, void *dt_start, void *dt_rule)
+{
+ struct dt_rule *rule = (struct dt_rule *) dt_start;
+
+ if (!hipac || !dt_start || !dt_rule){
+ ARG_MSG;
+ return 0;
+ }
+ if (rule->head.prev != &((struct dimtree *) hipac)->chain->head) {
+ if (rule->spec.pos ==
+ list_entry(rule->head.prev, struct dt_rule, head)
+ ->spec.pos){
+ ERR("previous rule with same position found");
+ return 0;
+ }
+ }
+ while (rule->spec.pos == ((struct dt_rule *) dt_rule)->spec.pos) {
+ if (rule == dt_rule)
+ return 1;
+ if (rule->head.next ==
+ &((struct dimtree *) hipac)->chain->head)
+ return 0;
+ rule = list_entry(rule->head.next, struct dt_rule, head);
+ }
+ return 0;
+}
+
+
+#endif
+
+
+
+/* End of hipac_* functions */
diff -urN nf-hipac/kernel/hipac.h nfhipac/kernel/hipac.h
--- nf-hipac/kernel/hipac.h 1970-01-01 08:00:00.000000000 +0800
+++ nfhipac/kernel/hipac.h 2014-11-21 12:36:09.000000000 +0800
@@ -0,0 +1,623 @@
+/*
+ * High performance packet classification
+ *
+ *
+ * (c) 2004-2005 MARA Systems AB
+ * +-----------------------------+
+ * | Michael Bellion |
+ * | |
+ * +-----------------------------+
+ *
+ * (c) 2002-2003 hipac core team :
+ * +---------------------------+--------------------------+
+ * | Michael Bellion | Thomas Heinz |
+ * | | |
+ * +---------------------------+--------------------------+
+ *
+ * Licenced under the GNU General Public Licence, version 2.
+ */
+
+
+#ifndef _HIPAC_H
+#define _HIPAC_H
+
+#include "mode.h"
+
+/* values of bittype in specification header */
+#define BIT_U16 0
+#define BIT_U32 1
+
+/* maximum length of a hipac chain name (including terminating '\0') */
+#define HIPAC_CHAIN_NAME_MAX_LEN 32
+
+/* representation of the match [left, right] associated with a dimension id;
+ [left, right] must not be a wildcard match */
+struct hipac_match
+{
+ unsigned dimid : 5;
+ unsigned invert : 1;
+ __u32 left;
+ __u32 right;
+ char next_match[0];
+};
+
+struct hipac_rule
+{
+ __u32 pos;
+ char cmp_start[0];
+ __u32 size;
+ __u32 origin;
+ __u8 action;
+ __u8 native_mct;
+ __u16 match_offset;
+ __u32 target_offset;
+ struct hipac_match first_match[0];
+};
+
+struct hipac_chain_info
+{
+ char *label;
+ __u8 policy;
+ __u8 is_internal_chain;
+ __u32 rule_num;
+ struct list_head *chain_head;
+};
+
+
+
+/* return values of function based match executor */
+typedef enum
+{
+ MATCH_YES,
+ MATCH_NO,
+ MATCH_HOTDROP
+} hipac_match_t;
+
+
+/* hipac_rule action value; TARGET_DUMMY is reserved for internal usage only;
+ the function based target exectutor may return TARGET_ACCEPT, TARGET_DROP
+ or TARGET_NONE */
+typedef enum
+{
+ TARGET_DROP = NF_DROP,
+ TARGET_ACCEPT = NF_ACCEPT,
+ TARGET_NONE = (NF_ACCEPT > NF_DROP ? NF_ACCEPT + 1 : NF_DROP + 1),
+ TARGET_RETURN,
+ TARGET_DUMMY,
+ TARGET_EXEC,
+ TARGET_CHAIN
+} hipac_target_t;
+
+
+/* function based match and target executor function types */
+typedef hipac_match_t (* hipac_match_exec_t) (const void *packet,
+ void *first_match, void *end);
+typedef hipac_target_t (* hipac_target_exec_t) (const void *packet,
+ void *target);
+
+
+/* dimension extractor function type */
+typedef __u32 (* hipac_extract_t) (const void *packet, int *hotdrop);
+
+
+/* equality function type */
+typedef int (* hipac_eq_exec_t) (const struct hipac_rule *r1,
+ const struct hipac_rule *r2);
+
+
+/* constructor/destructor function type */
+typedef void (* hipac_copy_constructor_t) (const struct hipac_rule *r_org,
+ struct hipac_rule *r_new);
+typedef void (* hipac_destroy_exec_t) (struct hipac_rule *r);
+
+
+/* hipac error codes */
+typedef enum
+{
+ HE_OK = 0,
+ HE_IMPOSSIBLE_CONDITION = -1,
+ HE_LOW_MEMORY = -2,
+ HE_CHAIN_EXISTS = -3,
+ HE_CHAIN_NOT_EXISTENT = -4,
+ HE_CHAIN_IS_EMPTY = -5,
+ HE_CHAIN_NOT_EMPTY = -6,
+ HE_CHAIN_IS_USERDEFINED = -7,
+ HE_CHAIN_IS_CONNECTED = -8,
+ HE_CHAIN_IS_REFERENCED = -9,
+ HE_CHAIN_NOT_NATIVE = -10,
+ HE_CHAIN_IS_NATIVE = -11,
+ HE_RULE_NOT_EXISTENT = -12,
+ HE_RULE_ORIGIN_MISMATCH = -13,
+ HE_RULE_PREFIX_MISMATCH = -14,
+ HE_LOOP_DETECTED = -15,
+ HE_REC_LIMIT = -16,
+ HE_TARGET_CHAIN_NOT_EXISTENT = -17,
+ HE_TARGET_CHAIN_IS_NATIVE = -18,
+ HE_NATIVE_CHAIN_EXISTS = -19,
+ HE_NEXT_ERROR = -100 // shouldn't be changed
+} hipac_error;
+
+
+
+/* return maximum key of a dimension with the given bittype */
+static inline __u32
+hipac_maxkey(__u8 bittype)
+{
+ if (bittype == BIT_U16)
+ return 0xffff;
+ return 0xffffffff;
+}
+
+
+/* init hipac data structures;
+ MUST be called once at the beginning in order to let the other
+ operations work properly!
+ dimid_to_bittype: assigns dimids to bit types.
+ i-th element of the array contains the bit type
+ of dimension id i
+ extract: functions to extract certain fields from a packet.
+ the function at position i of the array returns
+ the entry in a packet that corresponds to
+ dimension id i (i.e. the source ip of the packet)
+ len: length of the dim2btype and extract array
+ copycon: constructor function
+ destroy: destructor function
+ match: match executor function
+ target: target executor function
+ eq: equality function to compare rules
+ maxmem: maximum allowed memory consumption
+ possible errors: HE_LOW_MEMORY, HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_init(const __u8 dimid_to_bittype[], const hipac_extract_t extract[],
+ const __u8 len, hipac_copy_constructor_t copycon,
+ hipac_destroy_exec_t destroy, hipac_match_exec_t match,
+ hipac_target_exec_t target, hipac_eq_exec_t eq,
+ const __u64 maxmem);
+
+
+/* free all hipac data structures;
+ MUST be called once in the end
+ attention: make sure there are no external accesses to hipac
+ data structures taking place anymore! */
+void
+hipac_exit(void);
+
+
+/* return new hipac data structure
+ name: name of the public chain
+ name_intern: name of the internal dimtree chain
+ policy: initial policy
+ origin: bitvector uniq to this data structure
+ hipac: pointer to a pointer to the resulting hipac data
+ structure. use as first argument to hipac_match()
+ possible errors: HE_LOW_MEMORY, HE_NATIVE_CHAIN_EXISTS,
+ HE_CHAIN_EXISTS, HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_new(const char *name, const char* name_intern, const __u8 policy,
+ const __u32 origin, void **hipac);
+
+
+/* set maximum amount of memory the hipac data structures are
+ allowed to occupy. return LOW_MEMORY if 'mem' is lower than
+ currently allocated memory
+ possible errors: HE_LOW_MEMORY */
+hipac_error
+hipac_set_maxmem(const __u64 mem);
+
+
+/* get maximum amount of memory the hipac data structures are
+ allowed to occupy. */
+__u64
+hipac_get_maxmem(void);
+
+
+/* set policy of chain with name 'name' to 'policy'.
+ possible errors: HE_CHAIN_NOT_EXISTENT, HE_CHAIN_IS_USERDEFINED,
+ HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_set_policy(const char *name, const __u8 policy);
+
+
+/* get policy of chain with name 'name' and write it to 'result'.
+ possible errors: HE_CHAIN_NOT_EXISTENT, HE_CHAIN_IS_USERDEFINED,
+ HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_get_policy(const char *name, __u8 *result);
+
+
+/* create new user-defined chain with name 'name'.
+ possible errors: HE_LOW_MEMORY, HE_CHAIN_EXISTS,
+ HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_new_chain(const char* name);
+
+
+/* delete all rules in chain with name 'name'.
+ if 'name' is NULL all rules in all chains are deleted
+ possible errors: HE_LOW_MEMORY, HE_CHAIN_NOT_EXISTENT,
+ HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_flush_chain(const char *name);
+
+
+/* delete user-defined chain with name 'name'.
+ if 'name' is NULL delete all chains that are empty
+ and not referenced from other chains.
+ possible errors: HE_CHAIN_NOT_EXISTENT, HE_CHAIN_IS_NATIVE,
+ HE_CHAIN_NOT_EMPTY, HE_CHAIN_IS_REFERENCED */
+hipac_error
+hipac_delete_chain(const char *name);
+
+
+/* rename chain with name 'name' to 'new_name'.
+ possible errors: HE_LOW_MEMORY, HE_CHAIN_EXISTS,
+ HE_CHAIN_NOT_EXISTENT, HE_CHAIN_IS_NATIVE,
+ HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_rename_chain(const char *name, const char *new_name);
+
+
+/* get an array of hipac_chain_info structs containing required infos
+ for a rule listing of chain with name 'name'. if 'name' is NULL
+ return infos for all chains. 'len' specifies the length of the
+ returned struct hipac_chain_info array.
+ attention: don't forget to free the struct hipac_chain_info array
+ after the rule listing via hipac_free_chain_infos()!
+ possible errors: HE_LOW_MEMORY, HE_CHAIN_NOT_EXISTENT,
+ HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_get_chain_infos(const char *name, struct hipac_chain_info **inf,
+ __u32 *len);
+
+
+/* free array of hipac_chain_info structs that has been allocated
+ before via hipac_get_chain_infos().
+ possible errors: HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_free_chain_infos(struct hipac_chain_info *inf);
+
+
+/* get next hipac_rule 'next' of previous hipac_rule 'prev'.
+ with this function you can walk over the chain during rule listing.
+ to get the first hipac_rule of a chain, set 'prev' to NULL.
+ when the end of the chain is reached or the chain is empty the
+ hipac_error HE_RULE_NOT_EXISTENT is returned.
+ attention: during rule listing of a chain hipac_get_next_rule()
+ must always be called until finally HE_RULE_NOT_EXISTENT
+ is returned!
+ possible errors: HE_LOW_MEMORY, HE_RULE_NOT_EXISTENT,
+ IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_get_next_rule(const struct hipac_chain_info *inf,
+ struct hipac_rule *prev,
+ struct hipac_rule **next);
+
+
+/* append hipac_rule 'rule' to chain with name 'name'.
+ 'rule->pos' is set to the position of the last rule
+ in the chain + 1.
+ possible errors: HE_LOW_MEMORY, HE_CHAIN_NOT_EXISTENT,
+ HE_LOOP_DETECTED, HE_REC_LIMIT,
+ HE_RULE_ORIGIN_MISMATCH, HE_RULE_PREFIX_MISMATCH,
+ HE_TARGET_CHAIN_NOT_EXISTENT,
+ HE_TARGET_CHAIN_IS_NATIVE,
+ HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_append(const char *name, const struct hipac_rule *rule);
+
+
+/* insert hipac_rule 'rule' at position 'rule->pos' into chain
+ with name 'name'.
+ possible errors: HE_LOW_MEMORY, HE_CHAIN_NOT_EXISTENT,
+ HE_LOOP_DETECTED, HE_REC_LIMIT,
+ HE_RULE_ORIGIN_MISMATCH, HE_RULE_PREFIX_MISMATCH,
+ HE_TARGET_CHAIN_NOT_EXISTENT,
+ HE_TARGET_CHAIN_IS_NATIVE,
+ HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_insert(const char *name, const struct hipac_rule *rule);
+
+
+/* delete hipac_rule with position 'pos' from chain with name 'name'.
+ possible errors: HE_LOW_MEMORY, HE_CHAIN_NOT_EXISTENT,
+ HE_RULE_NOT_EXISTENT, HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_delete_pos(const char *name, const __u32 pos);
+
+
+/* find the first rule in chain with name 'name' that equals to
+ hipac_rule 'rule' and delete it.
+ possible errors: HE_LOW_MEMORY, HE_CHAIN_NOT_EXISTENT,
+ HE_RULE_NOT_EXISTENT, HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_delete(const char *name, const struct hipac_rule *rule);
+
+
+/* replace rule with position 'rule->pos' in chain with name 'name'
+ with hipac_rule 'rule'.
+ possible errors: HE_LOW_MEMORY, HE_CHAIN_NOT_EXISTENT,
+ HE_RULE_NOT_EXISTENT, HE_LOOP_DETECTED,
+ HE_REC_LIMIT, HE_RULE_ORIGIN_MISMATCH,
+ HE_RULE_PREFIX_MISMATCH,
+ HE_TARGET_CHAIN_NOT_EXISTENT,
+ HE_TARGET_CHAIN_IS_NATIVE,
+ HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_replace(const char *name, const struct hipac_rule *rule);
+
+
+/* match packet and return the terminal packet action which is either
+ TARGET_ACCEPT or TARGET_DROP; note that this is the only function
+ that may be used in parallel with other functions of the hipac API */
+hipac_target_t
+hipac_match(void *hipac, const void *packet);
+
+
+
+/*
+ * hipac statistics: data structures
+ */
+
+/* rlp statistics
+ total_mem_tight: current overall memory consumption in bytes
+ in terms of how much has been requested
+ total_mem_real: current overall memory consumption in bytes
+ in terms of how much has actually been
+ allocated
+ rlp_mem_tight: current memory consumption in bytes of all
+ rlps (not including termrule blocks) in
+ terms of how much has been requested
+ rlp_mem_real: current memory consumption in bytes of all
+ rlps (not including termrule blocks) in
+ terms of how much has actually been
+ allocated
+ termrule_mem_tight: current memory consumption in bytes of all
+ termrule blocks in terms of how much has
+ been requested
+ termrule_mem_real: current memory consumption in bytes of all
+ termrule blocks in terms of how much has
+ actually been allocated
+ rlp_num: number of rlps
+ rlp_dimid_num: mapping with [i] containing the number of
+ rlps in dimension i
+ rlp_depth_num: mapping with [i] containing the number of
+ rlps in depth i
+ termrule_num: number of termrule blocks
+ termrule_ptr_num: number of entries in all termrule blocks
+ keys_num: number of keys in all rlps
+ rlp_dimid_keys_stat: array of distributions with [i][j]
+ containing the number of rlps in
+ dimension i with 2^(i - 1) <= keys < 2^i
+ termptr_num: number of terminal pointers (of all rlps)
+ termptr_dimid_num: mapping with [i] containing the number of
+ terminal pointers in dimension i
+ termptr_depth_num: mapping with [i] containing the number of
+ terminal pointers in depth i
+ nontermptr_num: number of non-terminal pointers (of all
+ rlps)
+ nontermptr_dimid_num: mapping with [i] containing the number of
+ non-terminal pointers in dimension i
+ nontermptr_depth_num: mapping with [i] containing the number of
+ non-terminal pointers in depth i
+ dt_elem_num: number of elementary interval structures
+ dt_elem_ptr_num: number of rules in all elementary interval
+ structures
+ dt_elem_stat: distribution with [i] containing the number
+ of elementary interval structures with
+ 2^(i - 1) <= rules < 2^i */
+struct hipac_rlp_stat
+{
+ __u64 total_mem_tight;
+ __u64 total_mem_real;
+ __u64 rlp_mem_tight;
+ __u64 rlp_mem_real;
+ __u64 termrule_mem_tight;
+ __u64 termrule_mem_real;
+ __u32 rlp_num;
+ __u32 rlp_dimid_num[16];
+ __u32 rlp_depth_num[16];
+ __u32 termrule_num;
+ __u32 termrule_ptr_num;
+ __u32 keys_num;
+ __u32 rlp_dimid_keys_stat[16][18];
+ __u32 termptr_num;
+ __u32 termptr_dimid_num[16];
+ __u32 termptr_depth_num[16];
+ __u32 nontermptr_num;
+ __u32 nontermptr_dimid_num[16];
+ __u32 nontermptr_depth_num[16];
+ __u32 dt_elem_num;
+ __u32 dt_elem_ptr_num;
+ __u32 dt_elem_stat[16];
+};
+
+/* dimtree statistics
+ chain_mem_tight: current memory consumption in bytes of
+ a dimtree chain including the rules in
+ terms of how much has been requested
+ chain_mem_real: current memory consumption in bytes of
+ a dimtree chain including the rules in
+ terms of how much has actually been
+ allocated
+ rule_num: number of dimtree rules
+ rules_with_exec_matches: number of dimtree rules containing at
+ least one function based match
+ rules_with_exec_target: number of dimtree rules containing
+ a function based target
+ rules_same_pos_stat: distribution with [i] containing number
+ of dimtree rule series of length
+ >= 2^(i - 1) and < 2^i where all rules
+ share the same position
+ dt_match_stat: mapping with [i] containing the number
+ of dimtree rules having i non-wildcard
+ matches */
+struct hipac_dimtree_stat
+{
+ __u64 chain_mem_tight;
+ __u64 chain_mem_real;
+ __u32 rule_num;
+ __u32 rules_with_exec_matches;
+ __u32 rules_with_exec_target;
+ __u32 rules_same_pos_stat[16];
+ __u32 dt_match_stat[16];
+};
+
+/* hipac memory statistics
+ total_mem_tight: current overall memory consumption in
+ bytes in terms of how much has been
+ requested
+ total_mem_real: current overall memory consumption in
+ bytes in terms of how much has
+ actually been allocated
+ memhash_elem_num: number of objects for which memory
+ has been requested
+ memhash_len: number of buckets in the memory hash
+ memhash_smallest_bucket_len: number of objects in the smallest
+ bucket of the memory hash
+ memhash_biggest_bucket_len: number of objects in the biggest
+ bucket of the memory hash
+ memhash_bucket_stat: distribution with [i] containing the
+ number of buckets with
+ 2^(i - 1) <= objects < 2^i */
+struct hipac_mem_stat
+{
+ __u64 total_mem_tight;
+ __u64 total_mem_real;
+ __u32 memhash_elem_num;
+ __u32 memhash_len;
+ __u32 memhash_smallest_bucket_len;
+ __u32 memhash_biggest_bucket_len;
+ __u32 memhash_bucket_stat[16];
+
+};
+
+
+/* hipac chain statistics
+ mem_tight: current memory consumption in bytes of all
+ hipac chains including the rules in terms of
+ how much has been requested
+ mem_real: current memory consumption in bytes of all
+ hipac chains including the rules in terms of
+ how much has actually been allocated
+ chain_num: number of chains
+ rule_num: number of rules in all chains
+ paths_stat: distribution with [i] containing the number of
+ chains with 2^(i - 1) <= paths < 2^i
+ incoming_stat: distribution with [i] containing the number of
+ chains with 2^(i - 1) <= incoming edges < 2^i
+ outgoing_stat: distribution with [i] containing the number of
+ chains with 2^(i - 1) <= outgoing edges < 2^i */
+struct hipac_chain_stat
+{
+ __u64 mem_tight;
+ __u64 mem_real;
+ __u32 chain_num;
+ __u32 rule_num;
+ __u32 prefix_stat[16];
+ __u32 incoming_stat[16];
+ __u32 outgoing_stat[16];
+};
+
+
+/* hipac rule statistics
+ rule_num: number of rules
+ exec_match_num: number of rules with exec_matches
+ exec_target_num: number of rules with exec_target
+ jump_target_num: number of rules with jump target
+ return_target_num: number of rules with return target
+ hipac_match_stat: mapping with [i] containing the number
+ of rules with i hipac_matches
+ inv_rules_stat: mapping with [i] containing the number
+ of rules with i inversion flags */
+struct hipac_rule_stat
+{
+ __u32 rule_num;
+ __u32 exec_match_num;
+ __u32 exec_target_num;
+ __u32 jump_target_num;
+ __u32 return_target_num;
+ __u32 hipac_match_stat[16];
+ __u32 inv_rules_stat[16];
+};
+
+
+/* hipac user statistics
+ total_mem_tight: current memory consumption in bytes in terms
+ of how much has been requested
+ total_mem_real: current memory consumption in bytes in terms
+ of how much has actually been allocated
+ chain_num: number of chains
+ rule_num: number of rules in all chains */
+struct hipac_user_stat
+{
+ __u64 total_mem_tight;
+ __u64 total_mem_real;
+ __u32 chain_num;
+ __u32 rule_num;
+};
+
+
+
+/*
+ * hipac statistics: functions
+ */
+
+/* get rlp statistics, i.e. the statistics of the internal
+ rlp representation of all rules reachable from the root chain
+ represented by the 'hipac' pointer
+ possible errors: HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_get_rlp_stat(void *hipac, struct hipac_rlp_stat *stat);
+
+
+/* get dimtree statistics, i.e. the statistics of the internal
+ chain representation of all rules reachable from the root chain
+ represented by the 'hipac' pointer
+ possible errors: HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_get_dimtree_stat(void *hipac, struct hipac_dimtree_stat *stat);
+
+
+/* get hipac memory statistics
+ possible errors: HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_get_mem_stat(struct hipac_mem_stat *stat);
+
+
+/* get hipac chain statistics
+ possible errors: HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_get_chain_stat(struct hipac_chain_stat *stat);
+
+
+/* get hipac rule statistics
+ returned statistics constains all rules of those chains that are
+ reachable from the root chain represented by the 'hipac' pointer
+ possible errors: HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_get_rule_stat(void *hipac, struct hipac_rule_stat *stat);
+
+
+/* get hipac user statistics
+ possible errors: HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_get_user_stat(struct hipac_user_stat *stat);
+
+#ifdef DEBUG
+/* per object debugging: selection is done by an externally defined variable
+ hipac_debug which is a bit vector of DEBUG_* */
+# define DEBUG_HIPAC 0x01
+# define DEBUG_DIMTREE 0x02
+# define DEBUG_RLP 0x04
+# define DEBUG_IHASH 0x08
+# define DEBUG_GLOBAL 0x10
+ extern unsigned hipac_debug;
+
+hipac_error
+hipac_get_dt_rule_ptrs(const char *name, const __u32 pos, void **res);
+
+__u8
+dt_rules_have_same_position(void *hipac, void *dt_start, void *dt_rule);
+#endif
+
+#endif
diff -urN nf-hipac/kernel/ihash.c nfhipac/kernel/ihash.c
--- nf-hipac/kernel/ihash.c 1970-01-01 08:00:00.000000000 +0800
+++ nfhipac/kernel/ihash.c 2014-11-21 12:36:09.000000000 +0800
@@ -0,0 +1,463 @@
+/*
+ * High performance packet classification
+ *
+ *
+ * (c) 2004-2005 MARA Systems AB
+ * +-----------------------------+
+ * | Michael Bellion |
+ * | |
+ * +-----------------------------+
+ *
+ * (c) 2002-2003 hipac core team :
+ * +---------------------------+--------------------------+
+ * | Michael Bellion | Thomas Heinz |
+ * | | |
+ * +---------------------------+--------------------------+
+ *
+ * Licenced under the GNU General Public Licence, version 2.
+ */
+
+
+#include "global.h"
+#include "ihash.h"
+
+#define MAX_BUCKETS_MINI_ALLOC (MINI_ALLOC_MAX / sizeof(void *))
+#define INC_POSSIBLE(ihash, len) (!(ihash)->use_mini_alloc || \
+ (len) <= MAX_BUCKETS_MINI_ALLOC)
+#define BUCKET_SIZE(len) (sizeof(struct ihash_bucket) + (len) * \
+ sizeof(struct ihash_keyval))
+#define LEN(array) (sizeof(array) / sizeof(*(array)))
+
+
+int
+eq_val(const void *key1, const void *key2)
+{
+ return key1 == key2;
+}
+
+__u32
+ihash_func_val(const void *key)
+{
+#ifdef BIT32_ARCH
+ /* 32 bit mix function */
+ __u32 h = (__u32) key;
+
+ h += ~(h << 15);
+ h ^= (h >> 10);
+ h += (h << 3);
+ h ^= (h >> 6);
+ h += ~(h << 11);
+ h ^= (h >> 16);
+#else
+ /* 64 bit mix function */
+ __u64 h = (__u64) key;
+
+ h += ~(h << 32);
+ h ^= (h >> 22);
+ h += ~(h << 13);
+ h ^= (h >> 8);
+ h += (h << 3);
+ h ^= (h >> 15);
+ h += ~(h << 27);
+ h ^= (h >> 31);
+#endif
+ return h;
+}
+
+int
+eq_str(const void *key1, const void *key2)
+{
+ return !strcmp(key1, key2);
+}
+
+__u32
+ihash_func_str(const void *key)
+{
+ __u32 high, h = 0;
+ const char *c = key;
+
+ if (unlikely(key == NULL)) {
+ ERR("key is NULL");
+ return 0;
+ }
+ for (; *c != '\0'; c++) {
+ /* CRC variant */
+ high = h & 0xf8000000;
+ h <<= 5;
+ h ^= high >> 27;
+ h ^= *c;
+ }
+ return h;
+}
+
+static inline __u32
+near_pow2(__u32 n)
+{
+ if (n == 0 || n > 0x80000000) {
+ return 1;
+ }
+ n--;
+ n |= n >> 1;
+ n |= n >> 2;
+ n |= n >> 4;
+ n |= n >> 8;
+ n |= n >> 16;
+ return ++n;
+}
+
+struct ihash *
+ihash_new(__u32 len, int use_mini_alloc, __u32 avrg_elem_per_bucket,
+ ihash_func_t hash_fn, eq_t eq_fn)
+{
+ struct ihash *h;
+ struct ihash_bucket **b;
+ __u32 i;
+
+ if (unlikely(hash_fn == NULL || eq_fn == NULL)) {
+ ARG_MSG;
+ return NULL;
+ }
+ h = mini_alloc(sizeof(*h));
+ if (h == NULL) {
+ return NULL;
+ }
+ if (use_mini_alloc && len > MAX_BUCKETS_MINI_ALLOC) {
+ len = MAX_BUCKETS_MINI_ALLOC;
+ } else {
+ len = near_pow2(len);
+ }
+ b = use_mini_alloc ? mini_alloc(len * sizeof(*b)) :
+ hp_alloc(len * sizeof(*b), 1);
+ if (b == NULL) {
+ mini_free(h);
+ return NULL;
+ }
+ h->hash_fn = hash_fn;
+ h->eq_fn = eq_fn;
+ h->use_mini_alloc = use_mini_alloc;
+ h->avrg_elem_per_bucket = avrg_elem_per_bucket;
+ h->len = len;
+ h->elem_ct = 0;
+ h->bucket = b;
+ /* strictly speaking memset(b, 0, len * sizeof(*b)) would be wrong
+ here because there are architectures where the machine
+ representation of the NULL pointer is not 0x0 */
+ for (i = 0; i < len; i++) {
+ b[i] = NULL;
+ }
+ return h;
+}
+
+void
+ihash_free(struct ihash *h)
+{
+ __u32 i;
+
+ if (unlikely(h == NULL)) {
+ ARG_MSG;
+ return;
+ }
+ for (i = 0; i < h->len; i++) {
+ if (h->bucket[i] != NULL) {
+ mini_free(h->bucket[i]);
+ }
+ }
+ if (h->use_mini_alloc) {
+ mini_free(h->bucket);
+ } else {
+ hp_free(h->bucket);
+ }
+ mini_free(h);
+}
+
+/* return values: 0 : ok
+ -1 : low memory
+ -2 : bucket cannot be enlarged further */
+static inline int
+insert(struct ihash *h, void *key, void *val)
+{
+ struct ihash_bucket *b;
+ __u32 i;
+
+ i = HASH(h->hash_fn, key, h->len);
+ if (h->bucket[i] == NULL) {
+ /* first element in bucket */
+ b = mini_alloc(BUCKET_SIZE(1));
+ if (b == NULL) {
+ return -1;
+ }
+ b->len = 1;
+ b->kv[0].key = key;
+ b->kv[0].val = val;
+ h->elem_ct++;
+ h->bucket[i] = b;
+ return 0;
+ }
+ if (unlikely(BUCKET_SIZE(h->bucket[i]->len + 1) > MINI_ALLOC_MAX)) {
+ /* bucket cannot be enlarged further */
+ return -2;
+ }
+ if (unlikely(mini_alloc_size(BUCKET_SIZE(h->bucket[i]->len)) !=
+ mini_alloc_size(BUCKET_SIZE(h->bucket[i]->len + 1)))) {
+ /* bucket must be enlarged */
+ b = mini_alloc(BUCKET_SIZE(h->bucket[i]->len + 1));
+ if (b == NULL) {
+ return -1;
+ }
+ b->len = h->bucket[i]->len + 1;
+ b->kv[0].key = key;
+ b->kv[0].val = val;
+ memcpy(&b->kv[1], &h->bucket[i]->kv[0],
+ h->bucket[i]->len * sizeof(*b->kv));
+ h->elem_ct++;
+ mini_free(h->bucket[i]);
+ h->bucket[i] = b;
+ return 0;
+ }
+
+ h->bucket[i]->kv[h->bucket[i]->len].key = key;
+ h->bucket[i]->kv[h->bucket[i]->len].val = val;
+ h->bucket[i]->len++;
+ h->elem_ct++;
+ return 0;
+}
+
+/* return values like insert */
+static inline int
+rehash(struct ihash *h_old, struct ihash *h_new)
+{
+ __u32 i, j;
+ int stat;
+
+ for (i = 0; i < h_old->len; i++) {
+ if (h_old->bucket[i] == NULL) {
+ continue;
+ }
+ for (j = 0; j < h_old->bucket[i]->len; j++) {
+ stat = insert(
+ h_new, h_old->bucket[i]->kv[j].key,
+ h_old->bucket[i]->kv[j].val);
+ if (stat < 0) {
+ return stat;
+ }
+ }
+ }
+ return 0;
+}
+
+hipac_error
+ihash_insert(struct ihash **h, void *key, void *val)
+{
+ int shift = 1;
+ int do_inc = 0;
+ int stat;
+ __u32 len;
+
+ if (unlikely(h == NULL || *h == NULL || key == NULL)) {
+ ARG_ERR;
+ }
+ len = (*h)->len;
+ while (1) {
+ if (unlikely((do_inc || (*h)->elem_ct >=
+ len * (*h)->avrg_elem_per_bucket) &&
+ INC_POSSIBLE(*h, len << shift))) {
+ /* increase hash table */
+ struct ihash *new;
+
+ new = ihash_new(len << shift, (*h)->use_mini_alloc,
+ (*h)->avrg_elem_per_bucket,
+ (*h)->hash_fn, (*h)->eq_fn);
+ if (new == NULL) {
+ return HE_LOW_MEMORY;
+ }
+ stat = rehash(*h, new);
+ if (stat < 0) {
+ ihash_free(new);
+ if (stat == -2 &&
+ INC_POSSIBLE(*h, len << ++shift)) {
+ WARN_("ihash bucket full after rehash "
+ "-> try again with more buckets");
+ continue;
+ }
+ return HE_LOW_MEMORY;
+ }
+ ihash_free(*h);
+ *h = new;
+ do_inc = 0;
+ }
+ stat = insert(*h, key, val);
+ if (stat < 0) {
+ if (stat == -2 &&
+ (((*h)->elem_ct <
+ len * (*h)->avrg_elem_per_bucket &&
+ INC_POSSIBLE(*h, len << shift)) ||
+ INC_POSSIBLE(*h, len << ++shift))) {
+ WARN_("ihash bucket full after rehash -> try "
+ "again with more buckets");
+ do_inc = 1;
+ continue;
+ }
+ return HE_LOW_MEMORY;
+ }
+ return HE_OK;
+ }
+}
+
+static inline void
+delete(struct ihash *h, int i, int j, void **val)
+{
+ struct ihash_bucket *b;
+
+ if (unlikely(mini_alloc_size(BUCKET_SIZE(h->bucket[i]->len)) !=
+ mini_alloc_size(BUCKET_SIZE(h->bucket[i]->len - 1)))) {
+ /* shrink bucket */
+ b = mini_alloc(BUCKET_SIZE(h->bucket[i]->len - 1));
+ if (b != NULL) {
+ b->len = h->bucket[i]->len - 1;
+ if (j > 0) {
+ memcpy(b->kv, h->bucket[i]->kv,
+ j * sizeof(*b->kv));
+ }
+ if (h->bucket[i]->len > j + 1) {
+ memcpy(&b->kv[j], &h->bucket[i]->kv[j+1],
+ (h->bucket[i]->len - j - 1) *
+ sizeof(*b->kv));
+ }
+ if (val != NULL) {
+ *val = h->bucket[i]->kv[j].val;
+ }
+ mini_free(h->bucket[i]);
+ h->bucket[i] = b;
+ h->elem_ct--;
+ return;
+ } else {
+ WARN_("unable to shrink ihash bucket");
+ }
+ }
+
+ if (val != NULL) {
+ *val = h->bucket[i]->kv[j].val;
+ }
+ if (h->bucket[i]->len > j + 1) {
+ memmove(&h->bucket[i]->kv[j], &h->bucket[i]->kv[j + 1],
+ (h->bucket[i]->len - j - 1) * sizeof(*b->kv));
+ }
+ h->bucket[i]->len--;
+ h->elem_ct--;
+}
+
+hipac_error
+ihash_delete(struct ihash *h, const void *key, void **val)
+{
+ int i, j;
+
+ if (unlikely(h == NULL || key == NULL)) {
+ ARG_ERR;
+ }
+ i = HASH(h->hash_fn, key, h->len);
+ if (unlikely(h->bucket[i] == NULL)) {
+ goto not_contained;
+ }
+ for (j = h->bucket[i]->len - 1; j >= 0; j--) {
+ if (h->eq_fn(h->bucket[i]->kv[j].key, key)) {
+ delete(h, i, j, val);
+ return HE_OK;
+ }
+ }
+
+ not_contained:
+ IMPOSSIBLE_CONDITION("key not contained in ihash");
+}
+
+hipac_error
+ihash_replace(struct ihash **h, const void *oldkey, void **oldval,
+ void *newkey, void *newval)
+{
+ int i, j, stat;
+
+ if (unlikely(h == NULL || *h == NULL || oldkey == NULL ||
+ newkey == NULL)) {
+ ARG_ERR;
+ }
+ i = HASH((*h)->hash_fn, oldkey, (*h)->len);
+ if (unlikely((*h)->bucket[i] == NULL)) {
+ goto not_contained;
+ }
+ if (i != HASH((*h)->hash_fn, newkey, (*h)->len)) {
+ stat = ihash_insert(h, newkey, newval);
+ if (unlikely(stat < 0)) {
+ if (stat != HE_LOW_MEMORY) {
+ IMPOSSIBLE_CONDITION("ihash insert failed for"
+ " another reason than "
+ "low memory");
+ }
+ return stat;
+ }
+ /* a rehash might have occured so i must be recomputed */
+ i = HASH((*h)->hash_fn, oldkey, (*h)->len);
+ for (j = (*h)->bucket[i]->len - 1; j >= 0; j--) {
+ if ((*h)->eq_fn((*h)->bucket[i]->kv[j].key, oldkey)) {
+ delete(*h, i, j, oldval);
+ return HE_OK;
+ }
+ }
+ /* oldkey is not contained in h */
+ i = HASH((*h)->hash_fn, newkey, (*h)->len);
+ for (j = (*h)->bucket[i]->len - 1; j >= 0; j--) {
+ if ((*h)->eq_fn((*h)->bucket[i]->kv[j].key, newkey)) {
+ delete(*h, i, j, NULL);
+ goto not_contained;
+ }
+ }
+ IMPOSSIBLE_CONDITION("newkey not contained in ihash although "
+ "it has been inserted");
+ }
+ for (j = (*h)->bucket[i]->len - 1; j >= 0; j--) {
+ if ((*h)->eq_fn((*h)->bucket[i]->kv[j].key, oldkey)) {
+ if (oldval != NULL) {
+ *oldval = (*h)->bucket[i]->kv[j].val;
+ }
+ (*h)->bucket[i]->kv[j].key = newkey;
+ (*h)->bucket[i]->kv[j].val = newval;
+ return HE_OK;
+ }
+ }
+
+ not_contained:
+ IMPOSSIBLE_CONDITION("oldkey not contained in ihash");
+}
+
+hipac_error
+ihash_stat(struct ihash *h, struct ihash_stat *stat)
+{
+ __u32 i;
+
+ if (unlikely(h == NULL || stat == NULL)) {
+ ARG_ERR;
+ }
+
+ stat->elem_ct = h->elem_ct;
+ stat->bucket_len = h->len;
+ stat->small_bucket_len = 0xffffffff;
+ stat->big_bucket_len = 0;
+ stat_distribution_init(stat->bucket_dist, LEN(stat->bucket_dist));
+
+ for (i = 0; i < h->len; i++) {
+ if (h->bucket[i] == NULL) {
+ stat->small_bucket_len = 0;
+ stat_distribution_add(stat->bucket_dist,
+ LEN(stat->bucket_dist), 0);
+ continue;
+ }
+ if (h->bucket[i]->len < stat->small_bucket_len) {
+ stat->small_bucket_len = h->bucket[i]->len;
+ }
+ if (h->bucket[i]->len > stat->big_bucket_len) {
+ stat->big_bucket_len = h->bucket[i]->len;
+ }
+ stat_distribution_add(stat->bucket_dist,
+ LEN(stat->bucket_dist),
+ h->bucket[i]->len);
+ }
+ return HE_OK;
+}
diff -urN nf-hipac/kernel/ihash.h nfhipac/kernel/ihash.h
--- nf-hipac/kernel/ihash.h 1970-01-01 08:00:00.000000000 +0800
+++ nfhipac/kernel/ihash.h 2014-11-21 12:36:09.000000000 +0800
@@ -0,0 +1,285 @@
+/*
+ * High performance packet classification
+ *
+ *
+ * (c) 2002-2003 hipac core team :
+ * +---------------------------+--------------------------+
+ * | Michael Bellion | Thomas Heinz |
+ * | | |
+ * +---------------------------+--------------------------+
+ *
+ * Licenced under the GNU General Public Licence, version 2.
+ */
+
+
+#ifndef _IHASH_H
+#define _IHASH_H
+
+#include "mode.h"
+#include "global.h" // hipac_error and error message macros
+
+#define HASH(fn, key, len) (fn(key) & ((len) - 1))
+
+typedef __u32 (*ihash_func_t) (const void *key);
+typedef int (*eq_t) (const void *key1, const void *key2);
+
+struct ihash_keyval
+{
+ void *key, *val;
+};
+
+struct ihash_bucket
+{
+ __u32 len;
+ struct ihash_keyval kv[0];
+};
+
+struct ihash
+{
+ ihash_func_t hash_fn;
+ eq_t eq_fn;
+ int use_mini_alloc;
+ __u32 len, elem_ct, avrg_elem_per_bucket;
+ struct ihash_bucket **bucket;
+};
+
+struct ihash_stat
+{
+ __u32 elem_ct;
+ __u32 bucket_len, small_bucket_len, big_bucket_len;
+ /* bucket_dist[i] (0 <= i <= 14) contains the number of buckets
+ with <= 2^i - 1 (and >= 2^(i-1) if i > 0) elements;
+ bucket_dist[15] contains the number of buckets with >= 2^14
+ elements */
+ __u32 bucket_dist[16];
+};
+
+
+/* equality and hash function for strings as keys */
+int
+eq_str(const void *key1, const void *key2);
+
+__u32
+ihash_func_str(const void *key);
+
+
+/* equality and hash function for values as keys */
+int
+eq_val(const void *key1, const void *key2);
+
+__u32
+ihash_func_val(const void *key);
+
+/* if the value of the (key, val) pair is not a pointer but a value ptr_to_val
+ and val_to_ptr serve as a conversion functions */
+static inline __u64
+ptr_to_val(const void *p)
+{
+#ifdef BIT32_ARCH
+ return (__u32) p;
+#else
+ return (__u64) p;
+#endif
+}
+
+static inline void *
+val_to_ptr(__u64 v)
+{
+#ifdef BIT32_ARCH
+ return (void *) (__u32) v;
+#else
+ return (void *) v;
+#endif
+}
+
+
+/* create new hash table with len' buckets whereby len' is the nearest power
+ of two >= len; if use_mini_alloc is not 0 then mini_alloc is used to
+ allcate the bucket pointer array, otherwise hp_alloc is used;
+ avrg_elem_per_bucket indicates how many elements per bucket are allowed
+ at maximum assuming that they are equally distributed; in the case
+ use_mini_alloc is not 0 and the bucket pointer array cannot be further
+ enlarged the average number of elements per bucket may be larger */
+struct ihash *
+ihash_new(__u32 len, int use_mini_alloc, __u32 avrg_elem_per_bucket,
+ ihash_func_t hash_fn, eq_t eq_fn);
+
+void
+ihash_free(struct ihash *h);
+
+/* key must not be contained in h (this is not checked);
+ possible errors: HE_LOW_MEMORY, HE_IMPOSSIBLE_CONDITION */
+hipac_error
+ihash_insert(struct ihash **h, void *key, void *val);
+
+/* delete key and the corresponding value v from h; v stored in *val if val
+ is not NULL; key must be contained in h;
+ possible errors: HE_IMPOSSIBLE_CONDITION */
+hipac_error
+ihash_delete(struct ihash *h, const void *key, void **val);
+
+/* replace oldkey and the corresponding value v by newkey and newval; v is
+ stored in *oldval if oldval is not NULL; oldkey must be contained in h;
+ possible errors: HE_LOW_MEMORY, HE_IMPOSSIBLE_CONDITION */
+hipac_error
+ihash_replace(struct ihash **h, const void *oldkey, void **oldval,
+ void *newkey, void *newval);
+
+/* compute statistical info about h;
+ possible errors: HE_IMPOSSIBLE_CONDITION */
+hipac_error
+ihash_stat(struct ihash *h, struct ihash_stat *stat);
+
+/* generic lookup function */
+static inline void *
+ihash_lookup(const struct ihash *h, const void *key)
+{
+ __u32 i;
+ struct ihash_keyval *kv, *end;
+
+ if (unlikely(h == NULL || key == NULL)) {
+ ARG_MSG;
+ return NULL;
+ }
+ i = HASH(h->hash_fn, key, h->len);
+ if (h->bucket[i] == NULL) {
+ return NULL;
+ }
+ end = h->bucket[i]->kv + h->bucket[i]->len;
+ for (kv = h->bucket[i]->kv; kv < end; kv++) {
+ if (h->eq_fn(kv->key, key)) {
+ return kv->val;
+ }
+ }
+ return NULL;
+}
+
+/* optimized lookup function if keys are values */
+static inline void *
+ihash_lookup_val(const struct ihash *h, const void *key)
+{
+ __u32 i;
+ struct ihash_keyval *kv, *end;
+
+ if (unlikely(h == NULL || key == NULL)) {
+ ARG_MSG;
+ return NULL;
+ }
+ i = HASH(ihash_func_val, key, h->len);
+ if (h->bucket[i] == NULL) {
+ return NULL;
+ }
+ end = h->bucket[i]->kv + h->bucket[i]->len;
+ for (kv = h->bucket[i]->kv; kv < end; kv++) {
+ if (kv->key == key) {
+ return kv->val;
+ }
+ }
+ return NULL;
+}
+
+/* optimized lookup function if keys are strings */
+static inline void *
+ihash_lookup_str(const struct ihash *h, const void *key)
+{
+ __u32 i;
+ struct ihash_keyval *kv, *end;
+
+ if (unlikely(h == NULL || key == NULL)) {
+ ARG_MSG;
+ return NULL;
+ }
+ i = HASH(ihash_func_str, key, h->len);
+ if (i < 0 || i >= h->len || h->bucket[i] == NULL) {
+ return NULL;
+ }
+ end = h->bucket[i]->kv + h->bucket[i]->len;
+ for (kv = h->bucket[i]->kv; kv < end; kv++) {
+ if (!strcmp(kv->key, key)) {
+ return kv->val;
+ }
+ }
+ return NULL;
+}
+
+/* call fn(key) for all keys of h */
+#define IHASH_KEY_ITERATE(h, cast, fn, args...) \
+do { \
+ __u32 i, j; \
+ \
+ if (unlikely((h) == NULL)) { \
+ ARG_MSG; \
+ break; \
+ } \
+ for (i = 0; i < (h)->len; i++) { \
+ if ((h)->bucket[i] == NULL) { \
+ continue; \
+ } \
+ for (j = 0; j < (h)->bucket[i]->len; j++) { \
+ (fn)((cast) (h)->bucket[i]->kv[j].key , ## args); \
+ } \
+ } \
+} while (0)
+
+/* call fn(val) for all values of h */
+#define IHASH_VAL_ITERATE(h, cast, fn, args...) \
+do { \
+ __u32 i, j; \
+ \
+ if (unlikely((h) == NULL)) { \
+ ARG_MSG; \
+ break; \
+ } \
+ for (i = 0; i < (h)->len; i++) { \
+ if ((h)->bucket[i] == NULL) { \
+ continue; \
+ } \
+ for (j = 0; j < (h)->bucket[i]->len; j++) { \
+ (fn)((cast) (h)->bucket[i]->kv[j].val , ## args); \
+ } \
+ } \
+} while (0)
+
+/* use the following macros to iterate over all (key, val) pairs in hash:
+ IHASH_FOR_EACH(hash, key, val) {
+ // do something with key, val
+ IHASH_FOR_EACH_END;
+ }
+ IHASH_FOR_EACH_KEY and IHASH_FOR_EACH_VAL are used similarly */
+#define IHASH_FOR_EACH(h, hkey, hval) \
+{ \
+ __u32 _ihash_i, _ihash_j; \
+ for (_ihash_i = 0; _ihash_i < (h)->len; _ihash_i++) { \
+ if ((h)->bucket[_ihash_i] == NULL) { \
+ continue; \
+ } \
+ for (_ihash_j = 0; _ihash_j < (h)->bucket[_ihash_i]->len; \
+ _ihash_j++) { \
+ (hkey) = (h)->bucket[_ihash_i]->kv[_ihash_j].key; \
+ (hval) = (h)->bucket[_ihash_i]->kv[_ihash_j].val;
+
+#define IHASH_FOR_EACH_KEY(h, hkey) \
+{ \
+ __u32 _ihash_i, _ihash_j; \
+ for (_ihash_i = 0; _ihash_i < (h)->len; _ihash_i++) { \
+ if ((h)->bucket[_ihash_i] == NULL) { \
+ continue; \
+ } \
+ for (_ihash_j = 0; _ihash_j < (h)->bucket[_ihash_i]->len; \
+ _ihash_j++) { \
+ (hkey) = (h)->bucket[_ihash_i]->kv[_ihash_j].key;
+
+#define IHASH_FOR_EACH_VAL(h, hval) \
+{ \
+ __u32 _ihash_i, _ihash_j; \
+ for (_ihash_i = 0; _ihash_i < (h)->len; _ihash_i++) { \
+ if ((h)->bucket[_ihash_i] == NULL) { \
+ continue; \
+ } \
+ for (_ihash_j = 0; _ihash_j < (h)->bucket[_ihash_i]->len; \
+ _ihash_j++) { \
+ (hval) = (h)->bucket[_ihash_i]->kv[_ihash_j].val;
+
+#define IHASH_FOR_EACH_END }}} do {} while (0)
+
+#endif
diff -urN nf-hipac/kernel/Makefile nfhipac/kernel/Makefile
--- nf-hipac/kernel/Makefile 1970-01-01 08:00:00.000000000 +0800
+++ nfhipac/kernel/Makefile 2014-11-21 12:36:09.000000000 +0800
@@ -0,0 +1,24 @@
+#
+# Makefile for nf-hipac on top of IPv4.
+#
+LINUXPATH = /lib/modules/`uname -r`/build
+CURDIR = $(shell pwd)
+
+obj-m += nf_hipac.o
+nf_hipac-objs := ihash.o global.o rlp.o dimtree.o hipac.o nfhp_dev.o nfhp_proc.o nfhp_mod.o
+
+EXTRA_CFLAGS += -D SINGLE_PATH
+
+ifneq ($(ARCH), ia64)
+ifneq ($(ARCH), x86_64)
+EXTRA_CFLAGS += -D BIT32_ARCH
+endif
+endif
+
+all: mod
+
+mod:
+ $(MAKE) -C $(LINUXPATH) M=$(CURDIR) modules
+
+clean:
+ rm -rf *.o *.ko *.mod.c *.symvers *.mod.o .*.cmd .tmp_versions modules.order
diff -urN nf-hipac/kernel/mode.h nfhipac/kernel/mode.h
--- nf-hipac/kernel/mode.h 1970-01-01 08:00:00.000000000 +0800
+++ nfhipac/kernel/mode.h 2014-11-21 12:36:09.000000000 +0800
@@ -0,0 +1,236 @@
+/*
+ * High performance packet classification
+ *
+ *
+ * (c) 2004-2005 MARA Systems AB
+ * +-----------------------------+
+ * | Michael Bellion |
+ * | |
+ * +-----------------------------+
+ *
+ * (c) 2002-2003 hipac core team :
+ * +---------------------------+--------------------------+
+ * | Michael Bellion | Thomas Heinz |
+ * | | |
+ * +---------------------------+--------------------------+
+ *
+ * Licenced under the GNU General Public Licence, version 2.
+ */
+
+
+#ifndef _MODE_H
+#define _MODE_H
+
+#include // offsetof
+
+/* maximal number of bytes allocatable by mini_alloc */
+#define MINI_ALLOC_MAX 131072
+
+
+/*
+ * NEVER use big_alloc and big_free. Use hp_alloc and hp_free instead.
+ * The only exceptions to this rule is the implementation of hp_alloc,
+ * hp_realloc and hp_free.
+ *
+ * mini_alloc and mini_free can be used for small (<= MINI_ALLOC_MAX bytes)
+ * data structures if one wants to avoid the overhead of hp_alloc and hp_free
+ */
+#ifdef __KERNEL__
+#include
+#else
+#include
+#endif
+
+static inline unsigned
+big_alloc_size(unsigned size)
+{
+ return size == 0 ? 0 : (((size - 1) + PAGE_SIZE) & ~(PAGE_SIZE - 1));
+}
+
+
+#ifdef __KERNEL__
+/*
+ * Kernel space
+ */
+#include
+#include // ULONG_MAX
+#include // smp_num_cpus, cpu_number_map, smp_processor_id
+#include // Read Copy Update: sychronize_rcu
+#include // __cacheline_aligned
+#include // NF_ACCEPT, NF_DROP
+#include
+#include
+#include
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
+#define IPT_ALIGN(s) XT_ALIGN(s)
+#endif
+
+#define assert(as) do {} while (0)
+#define printf(str, args...) printk(str , ## args)
+
+static inline unsigned
+mini_alloc_size(unsigned size)
+{
+ unsigned int s;
+#define CACHE(x) if (size <= x) { s = x; goto found;}
+#include
+ return 0;
+found:
+ return s;
+}
+
+/* for small amounts of memory only (up to 128 KB) */
+static inline void *
+mini_alloc(unsigned size)
+{
+ return vmalloc(size);
+}
+
+static inline void
+mini_free(void *p)
+{
+ vfree(p);
+}
+
+/* memory is allocated in amounts of multiples of PAGE_SIZE */
+static inline void *
+big_alloc(unsigned size)
+{
+ return vmalloc(size);
+}
+
+static inline void
+big_free(void *p)
+{
+ vfree(p);
+}
+
+/* dirty hack to make stuff work with uml (otherwise high_physmem and end_vm
+ are not defined) */
+#ifdef CONFIG_UML_NET
+# undef TOP
+# ifdef CONFIG_HOST_2G_2G
+# define TOP 0x80000000
+# else
+# define TOP 0xc0000000
+# endif
+# undef SIZE
+# define SIZE ((CONFIG_NEST_LEVEL + CONFIG_KERNEL_HALF_GIGS) * 0x20000000)
+# undef START
+# define START (TOP - SIZE)
+# undef VMALLOC_OFFSET
+# define VMALLOC_OFFSET (8 * 1024 * 1024)
+# undef VMALLOC_START
+# define VMALLOC_START (((unsigned long) (START + 32 * 1024 * 1024) + \
+ VMALLOC_OFFSET) & ~(VMALLOC_OFFSET - 1))
+static unsigned long high_physmem = START + 32 * 1024 * 1024;
+static unsigned long end_vm = VMALLOC_START + 32 * 1024 * 1024;
+#endif /* CONFIG_UML_NET */
+
+
+
+
+#else /* __KERNEL__ */
+/*
+ * User space
+ */
+#include
+#if defined(__GLIBC__) && __GLIBC__ == 2
+# include
+#else /* libc5 */
+# include
+#endif
+#include
+#include
+#include
+#include // ULONG_MAX
+#include
+#include
+
+/* no assertions if not debugging */
+#ifndef DEBUG
+# undef assert
+# define assert(as) do {} while (0)
+#endif
+
+/* locking unnecessary in user space */
+#define synchronize_rcu(x) do {} while (0)
+
+/* printk compatibility */
+#define KERN_EMERG "KERN_EMERG: "
+#define KERN_ALERT "KERN_ALERT: "
+#define KERN_CRIT "KERN_CRIT: "
+#define KERN_ERR "KERN_ERR: "
+#define KERN_WARNING "KERN_WARNING: "
+#define KERN_NOTICE "KERN_NOTICE: "
+#define KERN_INFO "KERN_INFO: "
+#define KERN_DEBUG "KERN_DEBUG: "
+#define printk(str, args...) printf(str , ## args)
+
+/* netfilter verdict compatibility */
+#define NF_DROP 0
+#define NF_ACCEPT 1
+
+/* macro to annotate likely branch directions which results in the
+ blocks being reordered appropriately */
+#if __GNUC__ == 2 && __GNUC_MINOR__ < 96
+# define __builtin_expect(x, expected_value) (x)
+# define likely(x) __builtin_expect((x), 1)
+# define unlikely(x) __builtin_expect((x), 0)
+#endif
+
+static inline unsigned
+mini_alloc_size(unsigned size)
+{
+ unsigned int s;
+#define CACHE(x) if (size <= x) { s = x; goto found;}
+ CACHE(32);
+ CACHE(64);
+ CACHE(96);
+ CACHE(128);
+ CACHE(192);
+ CACHE(256);
+ CACHE(512);
+ CACHE(1024);
+ CACHE(2048);
+ CACHE(4096);
+ CACHE(8192);
+ CACHE(16384);
+ CACHE(32768);
+ CACHE(65536);
+ CACHE(131072);
+ return 0;
+found:
+ return s;
+}
+
+/* for small amounts of memory only (up to 128 KB) */
+static inline void *
+mini_alloc(unsigned size)
+{
+ return malloc(mini_alloc_size(size));
+}
+
+static inline void
+mini_free(void *p)
+{
+ free(p);
+}
+
+/* memory is allocated in amounts of multiples of PAGE_SIZE */
+static inline void *
+big_alloc(unsigned size)
+{
+ return malloc(big_alloc_size(size));
+}
+
+static inline void
+big_free(void *p)
+{
+ free(p);
+}
+
+#endif /* __KERNEL__ */
+
+#endif
diff -urN nf-hipac/kernel/nfhp_com.h nfhipac/kernel/nfhp_com.h
--- nf-hipac/kernel/nfhp_com.h 1970-01-01 08:00:00.000000000 +0800
+++ nfhipac/kernel/nfhp_com.h 2014-11-21 12:36:09.000000000 +0800
@@ -0,0 +1,294 @@
+/*
+ * High performance packet classification
+ *
+ *
+ * (c) 2004-2005 MARA Systems AB
+ * +-----------------------------+
+ * | Michael Bellion |
+ * | |
+ * +-----------------------------+
+ *
+ * (c) 2002-2003 hipac core team :
+ * +---------------------------+--------------------------+
+ * | Michael Bellion | Thomas Heinz |
+ * | | |
+ * +---------------------------+--------------------------+
+ *
+ * Licenced under the GNU General Public Licence, version 2.
+ */
+
+
+#ifndef _NFHP_COM_H
+#define _NFHP_COM_H
+
+#ifdef __KERNEL__
+# include
+# include
+# include
+#endif
+#include
+#include
+#include "hipac.h"
+
+
+/* a similar line will hopefully make its way into netlink.h */
+#define NETLINK_NFHIPAC 26
+#define NLHP_PROTO NETLINK_NFHIPAC
+#define NLHP_TYPE 0xFADE
+
+/* dimension id's */
+enum {
+ DIMID_STATE,
+ DIMID_SRC_IP,
+ DIMID_DEST_IP,
+ DIMID_INIFACE,
+ DIMID_OUTIFACE,
+ DIMID_PROTO,
+ DIMID_FRAGMENT,
+ DIMID_DPORT,
+ DIMID_SPORT,
+ DIMID_SYN,
+ DIMID_ICMP_TYPE,
+ DIMID_TTL,
+ NUMBER_OF_DIM
+};
+
+/* bit types */
+#define BIT_STATE BIT_U16
+#define BIT_SRC_IP BIT_U32
+#define BIT_DEST_IP BIT_U32
+#define BIT_INIFACE BIT_U16
+#define BIT_OUTIFACE BIT_U16
+#define BIT_PROTO BIT_U16
+#define BIT_FRAGMENT BIT_U16
+#define BIT_DPORT BIT_U16
+#define BIT_SPORT BIT_U16
+#define BIT_SYN BIT_U16
+#define BIT_ICMP_TYPE BIT_U16
+#define BIT_TTL BIT_U16
+
+/* origin bits */
+#define NFHP_ORIGIN_INPUT 0x1
+#define NFHP_ORIGIN_FORWARD 0x2
+#define NFHP_ORIGIN_OUTPUT 0x4
+
+/* hipac_error and nfhipac_error together form the netlink error messages */
+typedef enum
+{
+ NFHE_INDEX = HE_NEXT_ERROR, // Unable to retrieve unused ifindex
+ NFHE_NOMSG = HE_NEXT_ERROR - 1, // Incorrect message format
+ NFHE_CMD = HE_NEXT_ERROR - 2, // Invalid command
+ NFHE_LABEL = HE_NEXT_ERROR - 3, // Empty chain label
+ NFHE_NLABEL = HE_NEXT_ERROR - 4, // Empty new chain label
+ NFHE_POLICY = HE_NEXT_ERROR - 5, // Invalid policy
+ NFHE_ACTION = HE_NEXT_ERROR - 6, // Invalid action
+ NFHE_NMCT = HE_NEXT_ERROR - 7, // Invalid native match count
+ NFHE_IEOFF = HE_NEXT_ERROR - 8, // Invalid target_offset/next_offset
+ // in ipt_entry
+ NFHE_SORT = HE_NEXT_ERROR - 9, // Native matches not sorted or
+ // dimid duplicate
+ NFHE_MINT = HE_NEXT_ERROR - 10, // Invalid interval in native match
+ NFHE_DEVA = HE_NEXT_ERROR - 11, // Native interface match but no
+ // corresponding interface name
+ NFHE_DEVB = HE_NEXT_ERROR - 12, // Interface name but no corres-
+ // ponding native interface match
+ NFHE_FRAG = HE_NEXT_ERROR - 13, // Invalid fragment match
+ NFHE_PROTO = HE_NEXT_ERROR - 14, // Invalid protocol match
+ NFHE_SYN = HE_NEXT_ERROR - 15, // Invalid syn match
+ NFHE_STATE = HE_NEXT_ERROR - 16, // Invalid state match
+ NFHE_TCP = HE_NEXT_ERROR - 17, // tcp match dependency failure
+ NFHE_TCPUDP = HE_NEXT_ERROR - 18, // tcp or udp match dependency failure
+ NFHE_ICMP = HE_NEXT_ERROR - 19, // icmp match dependency failure
+ NFHE_CMPMIS = HE_NEXT_ERROR - 20, // Missing cmp_len array
+ NFHE_CMPSH = HE_NEXT_ERROR - 21, // cmp_len array too short
+ NFHE_CMPLA = HE_NEXT_ERROR - 22, // cmp_len array contains a value
+ // larger than the corresponding
+ // ipt match/target size
+ NFHE_ORIGIN = HE_NEXT_ERROR - 23, // Illegal combination of matches
+ // (no valid origin)
+ NFHE_TOFF = HE_NEXT_ERROR - 29, // Invalid target_offset
+ NFHE_CHAINE = HE_NEXT_ERROR - 30, // Empty chain name
+ NFHE_CHAINL = HE_NEXT_ERROR - 31, // Chain name too long
+ NFHE_CT = HE_NEXT_ERROR - 32, // Kernel does not have support for
+ // connection tracking, please recompile
+ NFHE_CTHELP = HE_NEXT_ERROR - 33, // Unable to load connection tracking
+ // helper module (nf_hipac_cthelp.o)
+ NFHE_ILL = HE_NEXT_ERROR - 34, // Illegal condition
+ NFHE_IMPL = HE_NEXT_ERROR - 35, // Feature not yet implemented
+ NFHE_SYSOFF = HE_NEXT_ERROR - 36 // - offset for system errno's -
+} nfhipac_error;
+
+/* errno is positive */
+#define ERRNO_TO_NFHE(e) (NFHE_SYSOFF - e)
+#define NFHE_TO_ERRNO(e) (NFHE_SYSOFF - e)
+
+/* connection tracking states */
+typedef enum
+{
+ NFHP_STATE_ESTABLISHED,
+ NFHP_STATE_RELATED,
+ NFHP_STATE_NEW,
+ NFHP_STATE_INVALID,
+ NFHP_STATE_UNTRACKED,
+ NFHP_STATE_NUM_VALID = NFHP_STATE_INVALID
+} nfhp_state;
+
+/* netlink commands */
+#define CMD_NONE 0
+#define CMD_MIN 1
+#define CMD_APPEND 1
+#define CMD_INSERT 2
+#define CMD_DELETE_RULE 3
+#define CMD_DELETE_POS 4
+#define CMD_REPLACE 5
+#define CMD_FLUSH 6
+#define CMD_NEW_CHAIN 7
+#define CMD_DELETE_CHAIN 8
+#define CMD_RENAME_CHAIN 9
+#define CMD_SET_POLICY 10
+#define CMD_LIST 11
+#define CMD_MAX 11
+
+struct nfhp_rule
+{
+ char indev[IFNAMSIZ];
+ char outdev[IFNAMSIZ];
+ struct hipac_rule r;
+ struct hipac_match m[NUMBER_OF_DIM]; // == r.first_match
+ /* we cannot use aligned(__alignof__(u_int64_t)) instead of
+ aligned(8) because of incompatibilities in gcc versions */
+ struct ipt_entry e[0] __attribute__((aligned(8)));
+};
+
+struct nfhp_chain
+{
+ char label[HIPAC_CHAIN_NAME_MAX_LEN];
+ char newlabel[HIPAC_CHAIN_NAME_MAX_LEN];
+ u_int8_t policy;
+};
+
+/* universal macros which can be used for USER <-> KERNEL (both directions) */
+#define HAS_CHAIN_TARGET(r) ((r)->action == TARGET_CHAIN)
+
+/*
+ * netlink communication: USER -> KERNEL
+ */
+
+/* command sent to kernel; only the necessary parts (depending on the command
+ type) must be filled in;
+
+ this is how a nfhp_cmd really looks like:
+ --------------------------------------------
+ | nfhp_cmd struct |
+ |------------------------------------------|
+ | ipt_entry |
+ |------------------------------------------|
+ | ipt_entry_match 1 |
+ |------------------------------------------|
+ | . . . |
+ |------------------------------------------|
+ | ipt_entry_match m |
+ |------------------------------------------|
+ | ipt_entry_target |
+ | or |
+ | chain label |
+ |------------------------------------------|
+ | cmp_len array of size m |
+ | or m + 1 if ipt_entry_target available |
+ --------------------------------------------
+
+ ipt_entry, ipt_entry_matches, ipt_entry_target / chain label and cmp_len are
+ optional; here are the rules defining their presence:
+ 1) if the rule action is TARGET_EXEC there is an ipt_entry_target
+ 2) if the rule action is TARGET_CHAIN there is a chain label
+ 3) if there is an ipt_entry_match or ipt_entry_target or chain label there
+ is an ipt_entry
+ 4) if there is an ipt_entry and cmd is CMD_DELETE_RULE there is cmp_len
+
+ => the smallest command simply consists of the nfhp_cmd struct
+
+ struct nfhp_cmd contains an embedded struct hipac_rule; set its member
+ match_offset to a value > 0 if there is at least one ipt_entry_match;
+ otherwise it must be 0; you don't have to specify the following members:
+ size, origin, target_offset
+
+
+*/
+struct nfhp_cmd
+{
+ u_int32_t cmd;
+ struct nfhp_chain chain;
+ struct nfhp_rule rule;
+};
+
+/* macros to access nfhp_cmd; hr is a pointer to the embedded hipac_rule */
+#define NFHP_RULE(hr) ((struct nfhp_rule *) \
+ ((char *) (hr) - (unsigned long) \
+ (&((struct nfhp_rule *) 0)->r)))
+
+
+
+/*
+ * netlink communication: KERNEL -> USER
+ */
+
+/*
+ in reply to a CMD_LIST command the kernel sends a series of packets to
+ the userspace; each packet is filled as much as possible so that the
+ number of packets being transfered is reduced to a minimum;
+ in case of an error which can happen sometime during the
+ transmission a packet containing the error number is sent (int32_t);
+ the data sent to the userspace is organized in the following way:
+ |struct nfhp_list_chain (chain 1)|rule 1|padding|...|rule n_1|padding|
+ | . . . . |
+ |struct nfhp_list_chain (chain k)|rule 1|padding|...|rule n_k|padding|
+ the rules are of the type struct nfhp_rule;
+
+ this is how a nfhp_list_rule really looks like:
+ --------------------------------------------
+ | nfhp_list_rule struct |
+ |------------------------------------------|
+ | hipac_match 1 |
+ |------------------------------------------|
+ | . . . |
+ |------------------------------------------|
+ | hipac_match n |
+ |------------------------------------------|
+ | ipt_entry_match 1 |
+ |------------------------------------------|
+ | . . . |
+ |------------------------------------------|
+ | ipt_entry_match m |
+ |------------------------------------------|
+ | ipt_entry_target |
+ | or |
+ | chain label |
+ --------------------------------------------
+
+ the number of hipac_matches depends on native_mct (member of hipac_rule);
+ there is neither ipt_entry nor cmp_len;
+
+ IMPORTANT: - there might be a padding between two consecutive rules
+ in order to meet the alignment requirements for rules which
+ contain 64 bit members; so you have to use the IPT_ALIGN
+ macro to jump to the next rule; note that there is no padding
+ after a chain because it contains 64 bit members which
+ enforce the strictest alignment on the system
+*/
+struct nfhp_list_chain
+{
+ char label[HIPAC_CHAIN_NAME_MAX_LEN];
+ u_int8_t policy;
+ u_int32_t rule_num;
+};
+
+struct nfhp_list_rule
+{
+ char indev[IFNAMSIZ];
+ char outdev[IFNAMSIZ];
+ struct hipac_rule r;
+};
+
+/* these macros together with the universal macros can be used to access
+ nfhp_list_rule */
+#endif
diff -urN nf-hipac/kernel/nfhp_dev.c nfhipac/kernel/nfhp_dev.c
--- nf-hipac/kernel/nfhp_dev.c 1970-01-01 08:00:00.000000000 +0800
+++ nfhipac/kernel/nfhp_dev.c 2014-11-21 12:36:09.000000000 +0800
@@ -0,0 +1,313 @@
+/*
+ * High performance packet classification
+ *
+ *
+ * (c) 2004-2005 MARA Systems AB
+ * +-----------------------------+
+ * | Michael Bellion |
+ * | |
+ * +-----------------------------+
+ *
+ * Licenced under the GNU General Public Licence, version 2.
+ */
+
+
+#include
+#include
+#include
+#include
+#include "nfhp_com.h"
+#include "nfhp_dev.h"
+
+
+#define IFNAME_MAP_INIT_LEN 31
+
+struct ifname_map_t
+{
+ u32 len;
+ u32 size;
+ struct
+ {
+ char *ifname;
+ u16 vindex;
+ } map[0];
+};
+
+static struct ifname_map_t *ifname_map;
+static char (*ifnames)[IFNAMSIZ];
+struct nf_hipac_dev_ifindex_map_t nf_hipac_dev_ifindex_map
+__attribute__((aligned(SMP_CACHE_BYTES)));
+
+#include
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)
+static spinlock_t dev_lock = SPIN_LOCK_UNLOCKED;
+#else
+static spinlock_t dev_lock = __ARCH_SPIN_LOCK_UNLOCKED;
+#endif
+
+static int
+init_data(void)
+{
+ ifname_map = kmalloc(sizeof(ifname_map) + IFNAME_MAP_INIT_LEN *
+ sizeof(*ifname_map->map), GFP_KERNEL);
+ if (ifname_map == NULL) {
+ return -ENOMEM;
+ }
+
+ ifnames = kmalloc(IFNAME_MAP_INIT_LEN * sizeof(*ifnames), GFP_KERNEL);
+ if (ifnames == NULL) {
+ kfree(ifname_map);
+ return -ENOMEM;
+ }
+ memset(&nf_hipac_dev_ifindex_map, 0, sizeof(nf_hipac_dev_ifindex_map));
+ memset(ifname_map, 0, sizeof(ifname_map) + IFNAME_MAP_INIT_LEN *
+ sizeof(*ifname_map->map));
+ memset(ifnames, 0, IFNAME_MAP_INIT_LEN * sizeof(*ifnames));
+ ifname_map->size = IFNAME_MAP_INIT_LEN;
+ return 0;
+}
+
+static void
+free_data(void)
+{
+ if (ifname_map != NULL) {
+ kfree(ifname_map);
+ ifname_map = NULL;
+ }
+
+ if (ifnames != NULL) {
+ kfree(ifnames);
+ ifnames = NULL;
+ }
+}
+
+static void
+ifindex_map_add_replace(u16 ifindex, u16 vindex)
+{
+ u16 i;
+ for (i = 0; i < nf_hipac_dev_ifindex_map.len; i++) {
+ if (nf_hipac_dev_ifindex_map.map[i].ifindex == ifindex) {
+ nf_hipac_dev_ifindex_map.map[i].vindex = vindex;
+ return;
+ }
+ }
+ for (i = 0; i < nf_hipac_dev_ifindex_map.len; i++) {
+ if (nf_hipac_dev_ifindex_map.map[i].ifindex == 0) {
+ nf_hipac_dev_ifindex_map.map[i].ifindex = ifindex;
+ nf_hipac_dev_ifindex_map.map[i].vindex = vindex;
+ return;
+ }
+ }
+ if (nf_hipac_dev_ifindex_map.len < NF_HIPAC_MAX_UP_INTERFACES) {
+ nf_hipac_dev_ifindex_map.map[nf_hipac_dev_ifindex_map.len]
+ .ifindex = ifindex;
+ nf_hipac_dev_ifindex_map.map[nf_hipac_dev_ifindex_map.len]
+ .vindex = vindex;
+ nf_hipac_dev_ifindex_map.len++;
+ } else {
+ printk(KERN_ERR "NF_HiPAC: too much interfaces UP at the "
+ "same time. Please increase NF_HIPAC_MAX_UP_INTERFACES "
+ "in nf_hipac_dev.h and recompile!");
+ }
+ return;
+}
+
+static void
+ifindex_map_del(u16 ifindex)
+{
+ u16 i;
+ for (i = 0; i < nf_hipac_dev_ifindex_map.len; i++) {
+ if (nf_hipac_dev_ifindex_map.map[i].ifindex == ifindex) {
+ nf_hipac_dev_ifindex_map.map[i].ifindex = 0;
+ nf_hipac_dev_ifindex_map.map[i].vindex = 0;
+ return;
+ }
+ }
+ return;
+}
+
+int
+ifname_map_lookup_vindex(const char *ifname)
+{
+ u16 pos;
+ int cmp;
+ u32 start = 1;
+ u32 stop = ifname_map->len;
+
+ while (stop >= start) {
+ pos = ((start + stop) >> 1) - 1;
+ cmp = strcmp(ifname_map->map[pos].ifname, ifname);
+ if (cmp < 0) {
+ start = pos + 2;
+ } else if (cmp > 0) {
+ stop = pos;
+ } else {
+ return ifname_map->map[pos].vindex;
+ }
+ }
+ return -1;
+}
+
+int
+nf_hipac_dev_lookup_ifname(int vindex, char ifname[])
+{
+ if (vindex < 1 || vindex > ifname_map->len)
+ return -1;
+ strlcpy(ifname, ifnames[vindex - 1], IFNAMSIZ);
+ return 0;
+}
+
+int
+nf_hipac_dev_get_vindex(const char *ifname)
+{
+ u32 max = 0;
+ u32 start = 1;
+ u16 pos;
+ u32 stop;
+ int cmp;
+ struct net_device *dev;
+
+ if (unlikely(ifname_map->len == 0)) {
+ strlcpy(ifnames[0], ifname, sizeof(*ifnames));
+ dev = dev_get_by_name(&init_net, ifname);
+ spin_lock_bh(&dev_lock);
+ ifname_map->len = 1;
+ ifname_map->map[0].ifname = ifnames[0];
+ ifname_map->map[0].vindex = 1;
+ if (dev) {
+ if (dev->flags & IFF_UP)
+ ifindex_map_add_replace(dev->ifindex, 1);
+ dev_put(dev);
+ }
+ spin_unlock_bh(&dev_lock);
+ return 1;
+ }
+
+ stop = ifname_map->len;
+ while (stop >= start) {
+ pos = ((start + stop) >> 1) - 1;
+ cmp = strcmp(ifname_map->map[pos].ifname, ifname);
+ if (cmp < 0) {
+ start = pos + 2;
+ } else if (cmp > 0) {
+ stop = pos;
+ max = pos + 1;
+ } else {
+ return ifname_map->map[pos].vindex;
+ }
+ }
+ if (max == 0) {
+ /* max has not been touched (otherwise it must be >= 1)
+ => new ifname is "maximal" */
+ pos = ifname_map->len;
+ } else {
+ pos = max - 1;
+ }
+
+ if (ifname_map->len == 65535) {
+ return NFHE_INDEX;
+ }
+
+ /* new vindex required -> do reallocations first if necessary */
+ if (unlikely(ifname_map->len == ifname_map->size)) {
+ u32 newsize = ((ifname_map->size + 1) << 1) - 1;
+ struct ifname_map_t *new_ifname_map;
+ char (*new_ifnames)[IFNAMSIZ];
+ new_ifname_map = kmalloc(sizeof(new_ifname_map) + newsize *
+ sizeof(*new_ifname_map->map),
+ GFP_KERNEL);
+ if (new_ifname_map == NULL) {
+ return HE_LOW_MEMORY;
+ }
+ new_ifnames = kmalloc(newsize * sizeof(*new_ifnames),
+ GFP_KERNEL);
+ if (new_ifnames == NULL) {
+ kfree(new_ifname_map);
+ return HE_LOW_MEMORY;
+ }
+ memcpy(new_ifname_map, ifname_map, sizeof(new_ifname_map) +
+ ifname_map->size * sizeof(*new_ifname_map->map));
+ new_ifname_map->size = newsize;
+ memcpy(new_ifnames, ifnames,
+ ifname_map->size * sizeof(*new_ifnames));
+ strlcpy(new_ifnames[ifname_map->len], ifname,
+ sizeof(*new_ifnames));
+ dev = dev_get_by_name(&init_net, ifname);
+ spin_lock_bh(&dev_lock);
+ kfree(ifname_map);
+ kfree(ifnames);
+ ifname_map = new_ifname_map;
+ ifnames = new_ifnames;
+ } else {
+ strlcpy(ifnames[ifname_map->len], ifname, sizeof(*ifnames));
+ dev = dev_get_by_name(&init_net, ifname);
+ spin_lock_bh(&dev_lock);
+ }
+
+ if (pos < ifname_map->len) {
+ memmove(&ifname_map->map[pos + 1], &ifname_map->map[pos],
+ (ifname_map->len - pos) * sizeof(*ifname_map->map));
+ }
+ ifname_map->map[pos].ifname = ifnames[ifname_map->len];
+ ifname_map->map[pos].vindex = ++ifname_map->len;
+ if (dev) {
+ if (dev->flags & IFF_UP)
+ ifindex_map_add_replace(dev->ifindex, ifname_map->len);
+ dev_put(dev);
+ }
+ spin_unlock_bh(&dev_lock);
+ return ifname_map->len;
+}
+
+static int
+nf_hipac_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
+{
+ int vindex;
+ struct net_device *dev = ptr;
+ switch (event) {
+ case NETDEV_UP:
+ spin_lock_bh(&dev_lock);
+ vindex = ifname_map_lookup_vindex(dev->name);
+ if (vindex > 0) {
+ // interface is in ruleset => add to ifindex_map
+ ifindex_map_add_replace(dev->ifindex, vindex);
+ }
+ spin_unlock_bh(&dev_lock);
+ break;
+
+ case NETDEV_DOWN:
+ spin_lock_bh(&dev_lock);
+ ifindex_map_del(dev->ifindex);
+ spin_unlock_bh(&dev_lock);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block nf_hipac_dev_notifier = {
+ .notifier_call = nf_hipac_dev_event,
+};
+
+int
+nf_hipac_dev_init(void)
+{
+ int stat;
+
+ stat = init_data();
+ if (stat < 0) {
+ return stat;
+ }
+ stat = register_netdevice_notifier(&nf_hipac_dev_notifier);
+ if (stat < 0) {
+ free_data();
+ return stat;
+ }
+ return 0;
+}
+
+void
+nf_hipac_dev_exit(void)
+{
+ unregister_netdevice_notifier(&nf_hipac_dev_notifier);
+ free_data();
+}
diff -urN nf-hipac/kernel/nfhp_dev.h nfhipac/kernel/nfhp_dev.h
--- nf-hipac/kernel/nfhp_dev.h 1970-01-01 08:00:00.000000000 +0800
+++ nfhipac/kernel/nfhp_dev.h 2014-11-21 12:36:09.000000000 +0800
@@ -0,0 +1,66 @@
+/*
+ * High performance packet classification
+ *
+ *
+ * (c) 2004-2005 MARA Systems AB
+ * +-----------------------------+
+ * | Michael Bellion |
+ * | |
+ * +-----------------------------+
+ *
+ * Licenced under the GNU General Public Licence, version 2.
+ */
+
+
+#ifndef _NF_HIPAC_DEV_H
+#define _NF_HIPAC_DEV_H
+
+#define NF_HIPAC_MAX_UP_INTERFACES 255
+
+struct nf_hipac_dev_ifindex_map_t
+{
+ u16 len;
+ struct
+ {
+ u16 ifindex;
+ u16 vindex;
+ } map[NF_HIPAC_MAX_UP_INTERFACES];
+};
+
+/* mapping from interface index to virtual interface index */
+extern struct nf_hipac_dev_ifindex_map_t nf_hipac_dev_ifindex_map;
+
+/* call init during module initialization; if something fails a negative
+ errno is returned, otherwise 0 is returned */
+int
+nf_hipac_dev_init(void);
+
+/* call exit during module finalization */
+void
+nf_hipac_dev_exit(void);
+
+/* copies the device name corresponding to vindex to ifname which should
+ be at least IFNAMSIZ bytes large and return 0;
+ if vindex cannot be found a value < 0 is returned */
+int
+nf_hipac_dev_lookup_ifname(int vindex, char ifname[]);
+
+/* return the corresponding virtual interface index if the interface is
+ already known; otherwise the interface is added to the list of known
+ non-existing interfaces and a new virtual interface index is returned;
+ if something fails a nfhipac_error is returned */
+int
+nf_hipac_dev_get_vindex(const char *ifname);
+
+/* return virtual interface index corresponding to ifindex */
+static inline u16
+nf_hipac_dev_ifindex_to_vindex(u16 ifindex)
+{
+ u16 i;
+ for (i = 0; i < nf_hipac_dev_ifindex_map.len; i++)
+ if (nf_hipac_dev_ifindex_map.map[i].ifindex == ifindex)
+ return nf_hipac_dev_ifindex_map.map[i].vindex;
+ return 0;
+}
+
+#endif
diff -urN nf-hipac/kernel/nfhp_mod.c nfhipac/kernel/nfhp_mod.c
--- nf-hipac/kernel/nfhp_mod.c 1970-01-01 08:00:00.000000000 +0800
+++ nfhipac/kernel/nfhp_mod.c 2014-11-21 12:51:25.000000000 +0800
@@ -0,0 +1,1181 @@
+/*
+ * High performance packet classification
+ *
+ *
+ * (c) 2004-2005 MARA Systems AB
+ * +-----------------------------+
+ * | Michael Bellion |
+ * | |
+ * +-----------------------------+
+ *
+ * (c) 2002-2003 hipac core team :
+ * +---------------------------+--------------------------+
+ * | Michael Bellion | Thomas Heinz |
+ * | | |
+ * +---------------------------+--------------------------+
+ *
+ * Licenced under the GNU General Public Licence, version 2.
+ */
+
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include "nfhp_mod.h"
+#include "nfhp_com.h"
+#include "nfhp_dev.h"
+#include "nfhp_proc.h"
+#include "hipac.h"
+
+/* hook match functions */
+static nf_hookfn input_match;
+static nf_hookfn forward_match;
+static nf_hookfn output_match;
+
+/* hipac data structures for INPUT, FORWARD and OUPUT hook plus
+ their corresponding netfilter ops */
+void *hipac_input = NULL;
+void *hipac_forward = NULL;
+void *hipac_output = NULL;
+struct nf_hook_ops input_op =
+{
+ .hook = input_match,
+ .owner = THIS_MODULE,
+ .pf = PF_INET,
+ .hooknum = NF_INET_LOCAL_IN,
+ .priority = NF_IP_PRI_FILTER - 1,
+};
+struct nf_hook_ops forward_op =
+{
+ .hook = forward_match,
+ .owner = THIS_MODULE,
+ .pf = PF_INET,
+ .hooknum = NF_INET_FORWARD,
+ .priority = NF_IP_PRI_FILTER - 1,
+};
+struct nf_hook_ops output_op =
+{
+ .hook = output_match,
+ .owner = THIS_MODULE,
+ .pf = PF_INET,
+ .hooknum = NF_INET_LOCAL_OUT,
+ .priority = NF_IP_PRI_FILTER - 1,
+};
+
+/* used to serialize hipac modifications caused by netlink commands and
+ the interface handling module */
+static DEFINE_MUTEX(nfhp_mutex);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)
+DECLARE_MUTEX(nlhp_lock);
+#else
+DEFINE_SEMAPHORE(nlhp_lock);
+#endif
+
+static struct sock *nfhp_sock = NULL;
+
+/* latest hipac_get_chain_info snapshot */
+struct list_info
+{
+ struct hipac_chain_info *inf;
+ u32 len;
+};
+static struct list_info linfo = {NULL, 0};
+
+struct packet
+{
+ int hook;
+ struct sk_buff **skbuff;
+ const struct net_device *indev, *outdev;
+};
+
+/*
+ * dimension extractor functions
+ */
+
+static u32
+get_state(const void *pkt, int *hotdrop)
+{
+ return 0;
+}
+
+static u32
+get_src_ip(const void *pkt, int *hotdrop)
+{
+ const struct sk_buff *skb = *((struct packet *) pkt)->skbuff;
+ struct iphdr *iph = ip_hdr(skb);
+ return ntohl(iph->saddr);
+}
+
+static u32
+get_dst_ip(const void *pkt, int *hotdrop)
+{
+ const struct sk_buff *skb = *((struct packet *) pkt)->skbuff;
+ struct iphdr *iph = ip_hdr(skb);
+ return ntohl(iph->daddr);
+}
+
+static u32
+get_iniface(const void *pkt, int *hotdrop)
+{
+ return nf_hipac_dev_ifindex_to_vindex(((struct packet*) pkt)
+ ->indev->ifindex);
+}
+
+static u32
+get_outiface(const void *pkt, int *hotdrop)
+{
+ return nf_hipac_dev_ifindex_to_vindex(((struct packet*) pkt)
+ ->outdev->ifindex);
+}
+
+static u32
+get_proto(const void *pkt, int *hotdrop)
+{
+ const struct sk_buff *skb = *((struct packet *) pkt)->skbuff;
+ struct iphdr *iph = ip_hdr(skb);
+ return iph->protocol;
+}
+
+static u32
+get_fragment(const void *pkt, int *hotdrop)
+{
+ const struct sk_buff *skb = *((struct packet *) pkt)->skbuff;
+ struct iphdr *iph = ip_hdr(skb);
+ int offset = ntohs(iph->frag_off) & IP_OFFSET;
+ if (unlikely(offset)) {
+ if (unlikely(offset == 1 &&
+ iph->protocol == IPPROTO_TCP)) {
+ printk(KERN_NOTICE "Dropping evil TCP offset=1 "
+ "fragment.\n");
+ *hotdrop = 1;
+ }
+ return 1;
+ }
+ return 0;
+}
+
+static u32
+get_dport(const void *pkt, int *hotdrop)
+{
+ struct udphdr _udph, *uh;
+ const struct sk_buff *skb = *((struct packet *) pkt)->skbuff;
+ struct iphdr *iph = ip_hdr(skb);
+ uh = skb_header_pointer(skb, iph->ihl*4,
+ sizeof(_udph), &_udph);
+ if (unlikely(uh == NULL)) {
+ /* We've been asked to examine this packet, and we
+ can't. Hence, no choice but to drop. */
+ *hotdrop = 1;
+ return 0;
+ }
+ return ntohs(uh->dest);
+}
+
+static u32
+get_sport(const void *pkt, int *hotdrop)
+{
+ struct udphdr _udph, *uh;
+ const struct sk_buff *skb = *((struct packet *) pkt)->skbuff;
+ struct iphdr *iph = ip_hdr(skb);
+ uh = skb_header_pointer(skb, iph->ihl*4,
+ sizeof(_udph), &_udph);
+ if (unlikely(uh == NULL)) {
+ *hotdrop = 1;
+ return 0;
+ }
+ return ntohs(uh->source);
+}
+
+static u32
+get_syn(const void *pkt, int *hotdrop)
+{
+ struct tcphdr _tcph, *th;
+ const struct sk_buff *skb = *((struct packet *) pkt)->skbuff;
+ struct iphdr *iph = ip_hdr(skb);
+ th = skb_header_pointer(skb, iph->ihl*4,
+ sizeof(_tcph), &_tcph);
+ if (unlikely(th == NULL)) {
+ *hotdrop = 1;
+ return 0;
+ }
+ return !(th->syn && !th->ack && !th->fin && !th->rst);
+}
+
+static u32
+get_icmptypes(const void *pkt, int *hotdrop)
+{
+ struct icmphdr _icmph, *ic;
+ const struct sk_buff *skb = *((struct packet *) pkt)->skbuff;
+ struct iphdr *iph = ip_hdr(skb);
+ ic = skb_header_pointer(skb, iph->ihl*4,
+ sizeof(_icmph), &_icmph);
+ if (unlikely(ic == NULL)) {
+ *hotdrop = 1;
+ return 0;
+ }
+ return (ic->type << 8) + ic->code;
+}
+
+static u32
+get_ttl(const void *pkt, int *hotdrop)
+{
+ const struct sk_buff *skb = *((struct packet *) pkt)->skbuff;
+ struct iphdr *iph = ip_hdr(skb);
+ return iph->ttl;
+}
+
+
+/*
+ * conntrack dependency management
+ */
+
+int
+nfhp_register_cthelp(struct module *cthelp)
+{
+ return 0;
+}
+
+void
+nfhp_unregister_cthelp(struct module *cthelp)
+{
+}
+
+static inline int
+cthelp_use(void)
+{
+ return NFHE_CT;
+}
+
+static inline void
+cthelp_unuse(void)
+{
+}
+
+/*
+ * functions and data structures necessary for hipac initialization
+ */
+
+/* dimension id to bit type mapping */
+static const u8 dim2btype[] =
+{
+ [DIMID_STATE] = BIT_STATE,
+ [DIMID_SRC_IP] = BIT_SRC_IP,
+ [DIMID_DEST_IP] = BIT_DEST_IP,
+ [DIMID_INIFACE] = BIT_INIFACE,
+ [DIMID_OUTIFACE] = BIT_OUTIFACE,
+ [DIMID_PROTO] = BIT_PROTO,
+ [DIMID_FRAGMENT] = BIT_FRAGMENT,
+ [DIMID_DPORT] = BIT_DPORT,
+ [DIMID_SPORT] = BIT_SPORT,
+ [DIMID_SYN] = BIT_SYN,
+ [DIMID_ICMP_TYPE] = BIT_ICMP_TYPE,
+ [DIMID_TTL] = BIT_TTL
+};
+
+/* dimension extractor functions */
+static const hipac_extract_t extract[] =
+{
+ [DIMID_STATE] = get_state,
+ [DIMID_SRC_IP] = get_src_ip,
+ [DIMID_DEST_IP] = get_dst_ip,
+ [DIMID_INIFACE] = get_iniface,
+ [DIMID_OUTIFACE] = get_outiface,
+ [DIMID_PROTO] = get_proto,
+ [DIMID_FRAGMENT] = get_fragment,
+ [DIMID_DPORT] = get_dport,
+ [DIMID_SPORT] = get_sport,
+ [DIMID_SYN] = get_syn,
+ [DIMID_ICMP_TYPE] = get_icmptypes,
+ [DIMID_TTL] = get_ttl,
+};
+
+/* iptables_match executor */
+static hipac_match_t
+hipac_match_exec(const void *packet, void *first_match, void *end)
+{
+ return MATCH_NO;
+}
+
+/* iptables_target executor */
+static hipac_target_t
+hipac_target_exec(const void *packet, void *target)
+{
+ return TARGET_NONE;
+}
+
+/* equality test - rnl is the hipac_rule in netlink format which implies
+ that it contains ipt_entry and cmp_len if the rule has an ipt_entry_match
+ or ipt_entry_target or chain label; rhp is in hipac format which means
+ that it does not contain ipt_entry and cmp_len */
+static int
+hipac_eq_exec(const struct hipac_rule *rnl, const struct hipac_rule *rhp)
+{
+
+ if (rnl == rhp) {
+ printk(KERN_ERR "%s: rnl == rhp error\n", __FUNCTION__);
+ return 0;
+ }
+ if (rnl == NULL || rhp == NULL || rnl->size != rhp->size ||
+ rnl->native_mct != rhp->native_mct ||
+ memcmp(rnl->cmp_start, rhp->cmp_start,
+ sizeof(*rnl) - offsetof(struct hipac_rule, cmp_start) +
+ rnl->native_mct * sizeof(*rnl->first_match))) {
+ return 0;
+ }
+ return 1;
+}
+
+/* r is constructed by copying rnl to the exclusion of ipt_entry and
+ cmp_len (if present); rnl->size already states the size of r _but_
+ rnl may be smaller than rnl->size if it has a chain target */
+static void
+hipac_copy_constructor(const struct hipac_rule *rnl, struct hipac_rule *r)
+{
+ memcpy(r, rnl, rnl->size);
+}
+
+/* destructor for iptables matches/target */
+static void
+hipac_destroy_exec(struct hipac_rule *r)
+{
+ int i;
+
+ if (r == NULL) {
+ return;
+ }
+ for (i = 0; i < r->native_mct &&
+ r->first_match[i].dimid < DIMID_STATE; i++);
+ if (i < r->native_mct && r->first_match[i].dimid == DIMID_STATE) {
+ cthelp_unuse();
+ }
+}
+
+/* destructor for iptables matches/target (rnl is the hipac_rule in
+ netlink format) */
+static void
+hipac_destroy_exec_nl(struct hipac_rule *rnl)
+{
+ int i;
+
+ if (rnl == NULL) {
+ return;
+ }
+ for (i = 0; i < rnl->native_mct &&
+ rnl->first_match[i].dimid < DIMID_STATE; i++);
+ if (i < rnl->native_mct && rnl->first_match[i].dimid == DIMID_STATE) {
+ cthelp_unuse();
+ }
+}
+
+static unsigned int
+input_match(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn) (struct sk_buff *))
+{
+ const struct packet pkt = {hooknum, &skb, in, out};
+ return hipac_match(hipac_input, &pkt);
+}
+
+static unsigned int
+forward_match(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn) (struct sk_buff *))
+{
+ const struct packet pkt = {hooknum, &skb, in, out};
+ return hipac_match(hipac_forward, &pkt);
+}
+
+static unsigned int
+output_match(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn) (struct sk_buff *))
+{
+ const struct packet pkt = {hooknum, &skb, in, out};
+ struct iphdr *iph = ip_hdr(skb);
+
+ /* root is playing with raw sockets. */
+ if (unlikely(skb->len < sizeof(struct iphdr) ||
+ (iph->ihl << 2) < sizeof(struct iphdr))) {
+ return NF_ACCEPT;
+ }
+ return hipac_match(hipac_output, &pkt);
+}
+
+
+/*
+ * kernel-user netlink communication
+ */
+
+static inline void *
+nlhp_list_rule(struct nfhp_list_rule *r, struct hipac_rule *rule, int *len)
+{
+ int size = IPT_ALIGN(offsetof(struct nfhp_list_rule, r) + rule->size);
+ u32 i;
+
+ if (*len < size) {
+ return NULL;
+ }
+ r->indev[0] = '\0';
+ r->outdev[0] = '\0';
+ memcpy(&r->r, rule, rule->size);
+
+ /* fill in interface names if necessary */
+ for (i = 0; i < r->r.native_mct; i++) {
+ switch (r->r.first_match[i].dimid) {
+ case DIMID_INIFACE:
+ if (nf_hipac_dev_lookup_ifname(
+ r->r.first_match[i].left,
+ r->indev) < 0) {
+ printk(KERN_ERR "%s: interface name look"
+ "up failed\n", __FUNCTION__);
+ }
+ break;
+ case DIMID_OUTIFACE:
+ if (nf_hipac_dev_lookup_ifname(
+ r->r.first_match[i].left,
+ r->outdev) < 0) {
+ printk(KERN_ERR "%s: interface name look"
+ "up failed\n", __FUNCTION__);
+ }
+ break;
+ }
+ }
+
+ *len -= size;
+ return (char *) r + size;
+}
+
+static inline void *
+nlhp_list_chain(struct nfhp_list_chain *c, int pos, int *len)
+{
+ if (*len < sizeof(*c)) {
+ return NULL;
+ }
+ strncpy(c->label, linfo.inf[pos].label, sizeof(c->label));
+ c->label[sizeof(c->label) - 1] = '\0';
+ c->policy = linfo.inf[pos].policy;
+ c->rule_num = linfo.inf[pos].rule_num;
+ *len -= sizeof(*c);
+ return c + 1;
+}
+
+static inline int
+nlhp_list_next_rule(struct hipac_rule *prev, struct hipac_rule **rule, int pos)
+{
+ int stat;
+
+ stat = hipac_get_next_rule(&linfo.inf[pos], prev, rule);
+ switch (stat) {
+ case HE_OK:
+ return 0;
+ case HE_RULE_NOT_EXISTENT:
+ *rule = NULL;
+ return 0;
+ default:
+ if (unlikely(stat > 0)) {
+ /* this should never happen */
+ printk(KERN_ERR "%s: hipac_get_next_rule returned "
+ "status > 0\n", __FUNCTION__);
+ stat = -stat;
+ }
+ return stat;
+ }
+}
+
+/* callback function for CMD_LIST command */
+static int
+nlhp_list(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ static u32 pos;
+ static struct hipac_rule *rule;
+ struct nlmsghdr *nlh;
+ int len, total, stat;
+ void *data;
+
+ total = skb_tailroom(skb) - NLMSG_SPACE(0);
+ switch (cb->args[0]) {
+ case 0:
+ /* first callback in the series */
+ pos = 0;
+ rule = NULL;
+ data = NLMSG_DATA(skb->data);
+ len = total;
+ cb->args[0] = 1;
+ break;
+ case 1:
+ /* pos, rule represent the current state */
+ data = NLMSG_DATA(skb->data);
+ len = total;
+ break;
+ default:
+ return 0;
+ }
+
+ while (1) {
+ if (rule == NULL) {
+ /* send chain info */
+ data = nlhp_list_chain(data, pos, &len);
+ if (data == NULL) {
+ /* skb full - chain sent next time */
+ break;
+ }
+ stat = nlhp_list_next_rule(NULL, &rule, pos);
+ if (stat < 0) {
+ /* rule listing aborted due to error */
+ return stat;
+ }
+ } else {
+ /* send next rule */
+ data = nlhp_list_rule(data, rule, &len);
+ if (data == NULL) {
+ /* skb full - rule sent next time */
+ break;
+ }
+ stat = nlhp_list_next_rule(rule, &rule, pos);
+ if (stat < 0) {
+ /* rule listing aborted due to error */
+ return stat;
+ }
+ }
+ if (rule == NULL) {
+ if (++pos == linfo.len) {
+ /* we are done */
+ cb->args[0] = 2;
+ break;
+ }
+ }
+ }
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)
+ nlh = NLMSG_PUT(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
+ NLHP_TYPE, total - len);
+#else
+ nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ NLHP_TYPE, total - len, 0);
+#endif
+ nlh->nlmsg_flags |= NLM_F_MULTI;
+ return NLMSG_SPACE(total - len);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)
+nlmsg_failure:
+ skb_trim(skb, (unsigned int)((unsigned char *)skb->tail - skb->data));
+ return NFHE_ILL;
+#endif
+}
+
+static int
+nlhp_done(struct netlink_callback *cb)
+{
+ return 0;
+}
+
+static void
+nlhp_send_reply(struct sk_buff *skb, struct nlmsghdr *nlh, int err)
+{
+ struct sk_buff *r_skb;
+ struct nlmsghdr *r_nlh;
+
+ r_skb = alloc_skb(NLMSG_SPACE(sizeof(int32_t)), GFP_KERNEL);
+ if (r_skb == NULL) {
+ return;
+ }
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)
+ r_nlh = NLMSG_PUT(r_skb, NETLINK_CB(skb).pid, nlh->nlmsg_seq,
+ NLMSG_ERROR, sizeof(int32_t));
+#else
+ r_nlh = nlmsg_put(r_skb, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
+ NLMSG_ERROR, sizeof(int32_t), 0);
+#endif
+ *(int32_t *) NLMSG_DATA(r_nlh) = err;
+ if (!NLMSG_OK(r_nlh, NLMSG_LENGTH(sizeof(int32_t)))) {
+ printk(KERN_ERR "netlink message not ok\n");
+ return;
+ }
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)
+ if (netlink_unicast(nfhp_sock, r_skb, NETLINK_CB(skb).pid,
+ MSG_DONTWAIT) <= 0) {
+#else
+ if (netlink_unicast(nfhp_sock, r_skb, NETLINK_CB(skb).portid,
+ MSG_DONTWAIT) <= 0) {
+#endif
+ printk(KERN_ERR "netlink_unicast failed\n");
+ return;
+ }
+ return;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)
+nlmsg_failure:
+ printk(KERN_ERR "NLMSG_PUT failed\n");
+ kfree(r_skb);
+#endif
+}
+
+static int
+do_cmd(struct sk_buff *skb, int msg_len);
+
+static inline int
+nlhp_chk_user_skb(struct sk_buff *skb)
+{
+ struct nlmsghdr *nlh = (struct nlmsghdr *) skb->data;
+
+ if (skb->len < sizeof(struct nlmsghdr) ||
+ nlh->nlmsg_len < sizeof(struct nlmsghdr) ||
+ skb->len < nlh->nlmsg_len ||
+ nlh->nlmsg_pid <= 0 ||
+ nlh->nlmsg_type != NLHP_TYPE ||
+ nlh->nlmsg_flags & NLM_F_MULTI ||
+ !(nlh->nlmsg_flags & NLM_F_REQUEST) ||
+ !(nlh->nlmsg_flags & NLM_F_ACK)) {
+ nlhp_send_reply(skb, nlh, NFHE_NOMSG);
+ return 1;
+ }
+ if (nlh->nlmsg_flags & MSG_TRUNC) {
+ nlhp_send_reply(skb, nlh, ERRNO_TO_NFHE(ECOMM));
+ return 1;
+ }
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)
+ if (security_netlink_recv(skb, CAP_SYS_ADMIN)) {
+#else
+ if (0) {
+#endif
+ nlhp_send_reply(skb, nlh, ERRNO_TO_NFHE(EPERM));
+ return 1;
+ }
+ return do_cmd(skb, skb->len - NLMSG_LENGTH(0));
+}
+
+static void
+nlhp_data_ready(struct sk_buff *skb)
+{
+ mutex_lock(&nfhp_mutex);
+ nlhp_chk_user_skb(skb);
+ mutex_unlock(&nfhp_mutex);
+}
+
+/*
+ * request handling
+ */
+
+static int
+cmd_check_init_native_matches(struct nfhp_rule *rule, int inc_modct,
+ int *inc_cthelp)
+{
+ u32 dimbitv = 0;
+ u8 frag_match = 0;
+ u16 proto_match = 0;
+ struct ipt_entry *e = NULL;
+ struct hipac_rule *r = &rule->r;
+ int i, ifindex, stat;
+ u8 dimid, inv, devbf;
+ u32 left, right;
+
+ devbf = (rule->indev[0] != '\0');
+ devbf |= (rule->outdev[0] != '\0') << 1;
+ for (i = 0; i < r->native_mct; i++) {
+ r->first_match[i].invert = !!r->first_match[i].invert;
+ dimid = r->first_match[i].dimid;
+ inv = r->first_match[i].invert;
+ left = r->first_match[i].left;
+ right = r->first_match[i].right;
+ if (i > 0 && dimid <= r->first_match[i - 1].dimid) {
+ return NFHE_SORT;
+ }
+ if (left > right || right > hipac_maxkey(dim2btype[dimid]) ||
+ (left == 0 && right == hipac_maxkey(dim2btype[dimid]))) {
+ return NFHE_MINT;
+ }
+ dimbitv |= 1 << dimid;
+ switch (dimid) {
+ case DIMID_INIFACE:
+ if (!(devbf & 1)) {
+ return NFHE_DEVA;
+ }
+ ifindex = nf_hipac_dev_get_vindex(rule->indev);
+ if (ifindex < 0) {
+ return ifindex;
+ }
+ r->first_match[i].left = ifindex;
+ r->first_match[i].right = ifindex;
+ if (e != NULL && inv) {
+ e->ip.invflags |= IPT_INV_VIA_IN;
+ }
+ devbf &= 0xfe;
+ r->origin &= NFHP_ORIGIN_INPUT |
+ NFHP_ORIGIN_FORWARD;
+ break;
+ case DIMID_OUTIFACE:
+ if (!(devbf & 2)) {
+ return NFHE_DEVA;
+ }
+ ifindex = nf_hipac_dev_get_vindex(rule->outdev);
+ if (ifindex < 0) {
+ return ifindex;
+ }
+ r->first_match[i].left = ifindex;
+ r->first_match[i].right = ifindex;
+ if (e != NULL && inv) {
+ e->ip.invflags |= IPT_INV_VIA_OUT;
+ }
+ devbf &= 0xfd;
+ r->origin &= NFHP_ORIGIN_OUTPUT |
+ NFHP_ORIGIN_FORWARD;
+ break;
+ case DIMID_PROTO:
+ if (!inv && left == right) {
+ proto_match = left;
+ }
+ if (e != NULL) {
+ e->ip.proto = r->first_match[i].left;
+ /* iptables does not support protocol
+ ranges; treating a range match as
+ inverted point match avoids illegal use
+ of iptables matches */
+ if (inv || left != right) {
+ e->ip.invflags |= IPT_INV_PROTO;
+ }
+ }
+ break;
+ case DIMID_FRAGMENT:
+ if (inv || (left != right && left == 0)) {
+ return NFHE_FRAG;
+ }
+ if (e != NULL) {
+ e->ip.flags = IPT_F_FRAG;
+ }
+ if (left > 0) {
+ r->first_match[i].left = 1;
+ r->first_match[i].right =
+ hipac_maxkey(dim2btype[dimid]);
+ } else {
+ frag_match = 1;
+ if (e != NULL) {
+ e->ip.invflags |= IPT_INV_FRAG;
+ }
+ }
+ break;
+ case DIMID_SYN:
+ if (inv || (left != right && left == 0)) {
+ return NFHE_SYN;
+ }
+ if (left > 0) {
+ r->first_match[i].left = 1;
+ r->first_match[i].right =
+ hipac_maxkey(dim2btype[dimid]);
+ }
+ break;
+ case DIMID_STATE:
+ if (left > NFHP_STATE_UNTRACKED) {
+ return NFHE_STATE;
+ }
+ if (inc_modct) {
+ stat = cthelp_use();
+ if (stat < 0) {
+ return stat;
+ }
+ (*inc_cthelp)++;
+ }
+ break;
+ }
+ }
+ if (devbf != 0) {
+ return NFHE_DEVB;
+ }
+
+ /* check inter-match dependencies */
+ if (dimbitv & (1 << DIMID_SYN)) {
+ if (proto_match != IPPROTO_TCP || !frag_match ||
+ dimbitv & (1 << DIMID_ICMP_TYPE)) {
+ return NFHE_TCP;
+ }
+ } else if (dimbitv & (1 << DIMID_DPORT) ||
+ dimbitv & (1 << DIMID_SPORT)) {
+ if ((proto_match != IPPROTO_UDP && proto_match != IPPROTO_TCP) || !frag_match ||
+ dimbitv & (1 << DIMID_ICMP_TYPE)) {
+ return NFHE_TCPUDP;
+ }
+ } else if (dimbitv & (1 << DIMID_ICMP_TYPE) &&
+ (proto_match != IPPROTO_ICMP || !frag_match)) {
+ return NFHE_ICMP;
+ }
+ return 0;
+}
+
+static inline u32
+origin_to_hookmask(u32 origin)
+{
+ return (origin & NFHP_ORIGIN_INPUT ? 1 << NF_INET_LOCAL_IN : 0) |
+ (origin & NFHP_ORIGIN_FORWARD ? 1 << NF_INET_FORWARD : 0) |
+ (origin & NFHP_ORIGIN_OUTPUT ? 1 << NF_INET_LOCAL_OUT : 0);
+}
+
+static void
+cmd_cleanup(struct hipac_rule *r, int inc_cthelp, int num_matches, int target)
+{
+ if (inc_cthelp) {
+ cthelp_unuse();
+ }
+}
+
+static int
+cmd_check_init(struct nfhp_cmd *cmd, int msg_len)
+{
+ int inc_cthelp = 0, num_matches = 0, target = 0 ;
+ int stat, inc_modct;
+ struct hipac_rule *r;
+ u32 c;
+
+ if (msg_len < sizeof(*cmd)) {
+ return NFHE_NOMSG;
+ }
+
+ /* basic checks */
+ c = cmd->cmd;
+ inc_modct = (c == CMD_APPEND || c == CMD_INSERT || c == CMD_REPLACE);
+ if (c < CMD_MIN || c > CMD_MAX) {
+ return NFHE_CMD;
+ }
+ cmd->chain.label[HIPAC_CHAIN_NAME_MAX_LEN - 1] = '\0';
+ cmd->chain.newlabel[HIPAC_CHAIN_NAME_MAX_LEN - 1] = '\0';
+ if (cmd->chain.label[0] == '\0' &&
+ !(c == CMD_FLUSH || c == CMD_DELETE_CHAIN || c == CMD_LIST)) {
+ return NFHE_LABEL;
+ }
+ if (c == CMD_RENAME_CHAIN && cmd->chain.newlabel[0] == '\0') {
+ return NFHE_NLABEL;
+ }
+ if (c == CMD_SET_POLICY && cmd->chain.policy != TARGET_ACCEPT &&
+ cmd->chain.policy != TARGET_DROP) {
+ return NFHE_POLICY;
+ }
+ if (!(c == CMD_APPEND || c == CMD_INSERT || c == CMD_DELETE_RULE ||
+ c == CMD_REPLACE)) {
+ /* we are finished since cmd->rule is irrelevant;
+ if c == CMD_DELETE_POS then cmd->rule.r.pos is verified
+ by hipac */
+ return 0;
+ }
+
+ /* rule checks */
+ r = &cmd->rule.r;
+ cmd->rule.indev[IFNAMSIZ - 1] = '\0';
+ cmd->rule.outdev[IFNAMSIZ - 1] = '\0';
+ r->origin = NFHP_ORIGIN_INPUT | NFHP_ORIGIN_FORWARD |
+ NFHP_ORIGIN_OUTPUT;
+ /* TEMPORARY FIX: TARGET_RETURN is not yet implemented */
+ if (r->action == TARGET_RETURN) {
+ return NFHE_IMPL;
+ }
+ if (!(r->action == TARGET_ACCEPT || r->action == TARGET_DROP ||
+ r->action == TARGET_NONE || r->action == TARGET_RETURN ||
+ r->action == TARGET_EXEC || r->action == TARGET_CHAIN)) {
+ return NFHE_ACTION;
+ }
+ if (r->native_mct > NUMBER_OF_DIM) {
+ return NFHE_NMCT;
+ }
+ r->size = sizeof(*r) + r->native_mct * sizeof(*r->first_match);
+ r->match_offset = r->target_offset = 0;
+
+ /* check the native matches */
+ stat = cmd_check_init_native_matches(&cmd->rule, inc_modct,
+ &inc_cthelp);
+ if (stat < 0) {
+ goto error;
+ }
+
+ /* rule _syntactically_ correct; it might still be invalid because
+ of a violation of the hipac semantics */
+ return 0;
+
+ error:
+ cmd_cleanup(r, inc_cthelp, num_matches, target);
+ return stat;
+}
+
+static int
+do_cmd(struct sk_buff *skb, int msg_len)
+{
+ struct nlmsghdr *nlh = (struct nlmsghdr *) skb->data;
+ struct nfhp_cmd *cmd = (struct nfhp_cmd *) NLMSG_DATA(nlh);
+ char *chain_label;
+ int stat;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
+ struct netlink_dump_control control = {
+ .dump = nlhp_list,
+ .done = nlhp_done,
+ .module = THIS_MODULE,
+ };
+#endif
+
+ stat = cmd_check_init(cmd, msg_len);
+ if (stat < 0) {
+ nlhp_send_reply(skb, nlh, stat);
+ return 1;
+ }
+ if (cmd->chain.label[0] == '\0') {
+ chain_label = NULL;
+ } else {
+ chain_label = cmd->chain.label;
+ }
+
+ switch (cmd->cmd) {
+ case CMD_APPEND:
+ stat = hipac_append(chain_label, &cmd->rule.r);
+ if (stat != HE_OK) {
+ hipac_destroy_exec_nl(&cmd->rule.r);
+ }
+ nlhp_send_reply(skb, nlh, stat);
+ break;
+ case CMD_INSERT:
+ stat = hipac_insert(chain_label, &cmd->rule.r);
+ if (stat != HE_OK) {
+ hipac_destroy_exec_nl(&cmd->rule.r);
+ }
+ nlhp_send_reply(skb, nlh, stat);
+ break;
+ case CMD_DELETE_RULE:
+ stat = hipac_delete(chain_label, &cmd->rule.r);
+ nlhp_send_reply(skb, nlh, stat);
+ break;
+ case CMD_DELETE_POS:
+ stat = hipac_delete_pos(chain_label, cmd->rule.r.pos);
+ nlhp_send_reply(skb, nlh, stat);
+ break;
+ case CMD_REPLACE:
+ stat = hipac_replace(chain_label, &cmd->rule.r);
+ if (stat != HE_OK) {
+ hipac_destroy_exec_nl(&cmd->rule.r);
+ }
+ nlhp_send_reply(skb, nlh, stat);
+ break;
+ case CMD_FLUSH:
+ stat = hipac_flush_chain(chain_label);
+ nlhp_send_reply(skb, nlh, stat);
+ break;
+ case CMD_NEW_CHAIN:
+ stat = hipac_new_chain(chain_label);
+ nlhp_send_reply(skb, nlh, stat);
+ break;
+ case CMD_DELETE_CHAIN:
+ stat = hipac_delete_chain(chain_label);
+ nlhp_send_reply(skb, nlh, stat);
+ break;
+ case CMD_RENAME_CHAIN:
+ stat = hipac_rename_chain(chain_label,
+ cmd->chain.newlabel);
+ nlhp_send_reply(skb, nlh, stat);
+ break;
+ case CMD_SET_POLICY:
+ stat = hipac_set_policy(chain_label, cmd->chain.policy);
+ nlhp_send_reply(skb, nlh, stat);
+ break;
+ case CMD_LIST:
+ {
+ if (linfo.inf != NULL) {
+ if (hipac_free_chain_infos(linfo.inf) != HE_OK) {
+ /* this should never happen */
+ printk(KERN_ERR "%s: hipac_free_chain_info"
+ " failed\n", __FUNCTION__);
+ }
+ linfo.inf = NULL;
+ linfo.len = 0;
+ }
+ stat = hipac_get_chain_infos(chain_label, &linfo.inf,
+ &linfo.len);
+ if (stat < 0) {
+ linfo.inf = NULL;
+ linfo.len = 0;
+ nlhp_send_reply(skb, nlh, stat);
+ return 1;
+ }
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)
+ if ((stat = netlink_dump_start(nfhp_sock, skb, nlh, nlhp_list,
+ nlhp_done)) != 0) {
+#else
+ if ((stat = netlink_dump_start(nfhp_sock, skb, nlh, &control)) != 0) {
+
+#endif
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2,6,32)
+ if (stat != -EINTR)
+#endif
+ printk(KERN_ERR "netlink_dump_start failed\n");
+ return 1;
+ }
+ /* nlhp_done will or already has released nlhp_lock so
+ don't release it again */
+ return 0;
+ }
+ default:
+ printk(KERN_ERR "invalid command type although "
+ "cmd_check_init reported a valid command\n");
+ nlhp_send_reply(skb, nlh, NFHE_NOMSG);
+ break;
+ }
+ return 1;
+}
+
+
+/*
+ * initialization, finalization
+ */
+
+static int
+__init init(void)
+{
+ struct sysinfo sys;
+ u64 total_mem;
+ int ret;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
+ struct netlink_kernel_cfg cfg = {
+ .groups = 0,
+ .flags = 0,
+ .cb_mutex = NULL,
+ .input = nlhp_data_ready,
+ };
+#endif
+
+ si_meminfo(&sys);
+ total_mem = (u64) sys.totalram << PAGE_SHIFT;
+
+ /* initialize hipac layer */
+ if (hipac_init(dim2btype, extract,
+ sizeof(dim2btype) / sizeof(*dim2btype),
+ hipac_copy_constructor, hipac_destroy_exec,
+ hipac_match_exec, hipac_target_exec, hipac_eq_exec,
+ total_mem >> 1) != HE_OK) {
+ printk(KERN_ERR "nf_hipac: initialization failed: unable to "
+ "initialize hipac algorithm\n");
+ return -ENOMEM;
+ }
+ if (hipac_new("INPUT", "__/INPUT_INTERN\\__", TARGET_ACCEPT,
+ NFHP_ORIGIN_INPUT, &hipac_input) != HE_OK) {
+ printk(KERN_ERR "nf_hipac: initialization failed: unable to "
+ "create hipac data structure for input hook\n");
+ ret = -ENOMEM;
+ goto cleanup_hipac;
+ }
+ if (hipac_new("FORWARD", "__/FORWARD_INTERN\\__", TARGET_ACCEPT,
+ NFHP_ORIGIN_FORWARD, &hipac_forward) != HE_OK) {
+ printk(KERN_ERR "nf_hipac: initialization failed: unable to "
+ "create hipac data structure for forward hook\n");
+ ret = -ENOMEM;
+ goto cleanup_hipac;
+ }
+ if (hipac_new("OUTPUT", "__/OUTPUT_INTERN\\__", TARGET_ACCEPT,
+ NFHP_ORIGIN_OUTPUT, &hipac_output) != HE_OK) {
+ printk(KERN_ERR "nf_hipac: initialization failed: unable to "
+ "create hipac data structure for output hook\n");
+ ret = -ENOMEM;
+ goto cleanup_hipac;
+ }
+
+ /* register to netfilter */
+ if ((ret = nf_register_hook(&input_op)) < 0) {
+ printk(KERN_ERR "nf_hipac: initialization failed: unable to "
+ "register input hook\n");
+ goto cleanup_hipac;
+ }
+ if ((ret = nf_register_hook(&forward_op)) < 0) {
+ printk(KERN_ERR "nf_hipac: initialization failed: unable to "
+ "register forward hook\n");
+ goto cleanup_input;
+ }
+ if ((ret = nf_register_hook(&output_op)) < 0) {
+ printk(KERN_ERR "nf_hipac: initialization failed: unable to "
+ "register output hook\n");
+ goto cleanup_forward;
+ }
+
+ /* initialize interface manager */
+ if ((ret = nf_hipac_dev_init()) != 0) {
+ printk(KERN_ERR "nf_hipac: initialization failed: unable to "
+ "initialize device management\n");
+ goto cleanup_output;
+ }
+
+ /* initialize proc interface */
+ hpproc_init(total_mem);
+
+ /* enable netlink user communication */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)
+ nfhp_sock = netlink_kernel_create(&init_net, NLHP_PROTO, 0, nlhp_data_ready, NULL, THIS_MODULE);
+#else
+ nfhp_sock = netlink_kernel_create(&init_net, NLHP_PROTO, &cfg);
+#endif
+ if (nfhp_sock == NULL) {
+ printk(KERN_ERR "nf_hipac: initialization failed: unable to "
+ "create kernel netlink socket\n");
+ ret = -ENOMEM;
+ goto cleanup_hpproc;
+ }
+
+ printk(KERN_INFO "nf_hipac: (C) 2002-2003 HIPAC core team "
+ "(Michael Bellion, Thomas Heinz)\n");
+ printk(KERN_INFO "nf_hipac: (C) 2004-2005 MARA Systems AB "
+ "(Michael Bellion)\n");
+ return 0;
+
+cleanup_hpproc:
+ hpproc_exit();
+ nf_hipac_dev_exit();
+cleanup_output:
+ nf_unregister_hook(&output_op);
+cleanup_forward:
+ nf_unregister_hook(&forward_op);
+cleanup_input:
+ nf_unregister_hook(&input_op);
+cleanup_hipac:
+ hipac_exit();
+ return ret;
+}
+
+static void
+__exit fini(void)
+{
+ /* wait for ongoing netlink or proc operations to finish */
+ down(&nlhp_lock);
+ if (nfhp_sock == NULL ||
+ nfhp_sock->sk_socket == NULL) {
+ /* this should never happen */
+ printk(KERN_ERR "nfhp_sock is broken\n");
+ } else {
+ sock_release(nfhp_sock->sk_socket);
+ }
+ if (linfo.inf != NULL &&
+ hipac_free_chain_infos(linfo.inf) != HE_OK) {
+ /* this should never happen */
+ printk(KERN_ERR "%s: hipac_free_chain_info"
+ " failed\n", __FUNCTION__);
+ }
+ hpproc_exit();
+ nf_hipac_dev_exit();
+ nf_unregister_hook(&input_op);
+ nf_unregister_hook(&forward_op);
+ nf_unregister_hook(&output_op);
+ hipac_exit();
+ up(&nlhp_lock);
+}
+
+
+module_init(init);
+module_exit(fini);
+MODULE_AUTHOR("Michael Bellion and Thomas Heinz");
+MODULE_DESCRIPTION("NF-HIPAC - netfilter high performance "
+ "packet classification");
+MODULE_LICENSE("GPL");
+
+EXPORT_SYMBOL(nfhp_register_cthelp);
+EXPORT_SYMBOL(nfhp_unregister_cthelp);
+
diff -urN nf-hipac/kernel/nfhp_mod.h nfhipac/kernel/nfhp_mod.h
--- nf-hipac/kernel/nfhp_mod.h 1970-01-01 08:00:00.000000000 +0800
+++ nfhipac/kernel/nfhp_mod.h 2014-11-21 12:36:09.000000000 +0800
@@ -0,0 +1,47 @@
+/*
+ * High performance packet classification
+ *
+ *
+ * (c) 2004-2005 MARA Systems AB
+ * +-----------------------------+
+ * | Michael Bellion |
+ * | |
+ * +-----------------------------+
+ *
+ * (c) 2002-2003 hipac core team :
+ * +---------------------------+--------------------------+
+ * | Michael Bellion | Thomas Heinz |
+ * | | |
+ * +---------------------------+--------------------------+
+ *
+ * Licenced under the GNU General Public Licence, version 2.
+ */
+
+
+#ifndef _NFHP_MOD_H
+#define _NFHP_MOD_H
+
+#include
+#include
+
+/* hipac data structures for INPUT, FORWARD and OUTPUT hook and the
+ corresponding netfilter hook ops */
+extern void *hipac_input;
+extern struct nf_hook_ops input_op;
+
+extern void *hipac_forward;
+extern struct nf_hook_ops forward_op;
+
+extern void *hipac_output;
+extern struct nf_hook_ops output_op;
+
+/* netlink mutex */
+extern struct semaphore nlhp_lock;
+
+int
+nfhp_register_cthelp(struct module *nfhp_cthelp_module);
+
+void
+nfhp_unregister_cthelp(struct module *nfhp_cthelp_module);
+
+#endif
diff -urN nf-hipac/kernel/nfhp_proc.c nfhipac/kernel/nfhp_proc.c
--- nf-hipac/kernel/nfhp_proc.c 1970-01-01 08:00:00.000000000 +0800
+++ nfhipac/kernel/nfhp_proc.c 2014-11-21 12:36:09.000000000 +0800
@@ -0,0 +1,884 @@
+/*
+ * High performance packet classification
+ *
+ *
+ * (c) 2004-2005 MARA Systems AB
+ * +-----------------------------+
+ * | Michael Bellion |
+ * | |
+ * +-----------------------------+
+ *
+ * (c) 2002-2003 hipac core team :
+ * +---------------------------+--------------------------+
+ * | Michael Bellion | Thomas Heinz |
+ * | | |
+ * +---------------------------+--------------------------+
+ *
+ * Licenced under the GNU General Public Licence, version 2.
+ */
+
+
+#include
+#include
+#include
+#include
+#include "nfhp_mod.h"
+#include "hipac.h"
+
+#define BT_I "rlp_input"
+#define BT_F "rlp_forward"
+#define BT_O "rlp_output"
+#define DT_I "dimtree_input"
+#define DT_F "dimtree_forward"
+#define DT_O "dimtree_output"
+#define HP_I "hipac_rules_input"
+#define HP_F "hipac_rules_forward"
+#define HP_O "hipac_rules_output"
+#define HP_C "hipac_chains"
+#define MEM "mem"
+#define INF "info"
+
+#define INF_M S_IRUGO | S_IWUSR
+#define OTH_M S_IRUSR
+
+struct proc_data
+{
+ char *text;
+ void *stat;
+ u32 len, valid_len;
+ rwlock_t lock;
+ void *hipac;
+ char *hipac_name;
+};
+
+struct nfhp_proc_entry
+{
+ const char *name;
+ struct proc_dir_entry *entry;
+ struct proc_dir_entry *parent;
+ mode_t mode;
+ read_proc_t *read_fn;
+ write_proc_t *write_fn;
+ void *hipac;
+ char *hipac_name;
+ u32 text_mem_required;
+ u32 stat_mem_required;
+};
+static struct proc_dir_entry *nfhipac_dir, *stat_dir;
+static const char proc_nfhipac_dir[] = "nf-hipac";
+static const char proc_stat_dir[] = "statistics";
+
+static write_proc_t info_write;
+static read_proc_t info_read;
+static read_proc_t mem_read;
+static read_proc_t rlp_read;
+static read_proc_t dimtree_read;
+static read_proc_t hipac_r_read;
+static read_proc_t hipac_c_read;
+
+/* the non constant members are initialized by init_nfhp_proc() */
+static struct nfhp_proc_entry nfhp_proc[] =
+{
+ { INF, NULL, NULL, INF_M, info_read, info_write, NULL, NULL,
+ 1000, sizeof(struct hipac_user_stat) },
+
+ { MEM, NULL, NULL, OTH_M, mem_read, NULL, NULL, NULL,
+ 2000, sizeof(struct hipac_mem_stat) },
+
+ { BT_I, NULL, NULL, OTH_M, rlp_read, NULL, NULL, "INPUT",
+ 25000, sizeof(struct hipac_rlp_stat) },
+
+ { BT_F, NULL, NULL, OTH_M, rlp_read, NULL, NULL, "FORWARD",
+ 25000, sizeof(struct hipac_rlp_stat) },
+
+ { BT_O, NULL, NULL, OTH_M, rlp_read, NULL, NULL, "OUTPUT",
+ 25000, sizeof(struct hipac_rlp_stat) },
+
+ { DT_I, NULL, NULL, OTH_M, dimtree_read, NULL, NULL, "INPUT",
+ 3000, sizeof(struct hipac_dimtree_stat) },
+
+ { DT_F, NULL, NULL, OTH_M, dimtree_read, NULL, NULL, "FORWARD",
+ 3000, sizeof(struct hipac_dimtree_stat) },
+
+ { DT_O, NULL, NULL, OTH_M, dimtree_read, NULL, NULL, "OUTPUT",
+ 3000, sizeof(struct hipac_dimtree_stat) },
+
+ { HP_I, NULL, NULL, OTH_M, hipac_r_read, NULL, NULL, "INPUT",
+ 3000, sizeof(struct hipac_rule_stat) },
+
+ { HP_F, NULL, NULL, OTH_M, hipac_r_read, NULL, NULL, "FORWARD",
+ 3000, sizeof(struct hipac_rule_stat) },
+
+ { HP_O, NULL, NULL, OTH_M, hipac_r_read, NULL, NULL, "OUTPUT",
+ 3000, sizeof(struct hipac_rule_stat) },
+
+ { HP_C, NULL, NULL, OTH_M, hipac_c_read, NULL, NULL, NULL,
+ 4000, sizeof(struct hipac_chain_stat) }
+};
+
+static const char indent_spc[] = " ";
+static u64 nfhp_total_mem = 0;
+
+
+
+/*
+ * helpers
+ */
+
+static inline void
+init_nfhp_proc(struct proc_dir_entry *nfhipac_dir,
+ struct proc_dir_entry *stat_dir)
+{
+ int i;
+
+ for (i = 0; i < sizeof(nfhp_proc) / sizeof(*nfhp_proc); i++) {
+ if (nfhp_proc[i].write_fn == info_write) {
+ nfhp_proc[i].parent = nfhipac_dir;
+ } else {
+ nfhp_proc[i].parent = stat_dir;
+ }
+ if (nfhp_proc[i].hipac_name == NULL) {
+ continue;
+ }
+ if (strcmp(nfhp_proc[i].hipac_name, "INPUT") == 0) {
+ nfhp_proc[i].hipac = hipac_input;
+ } else if (strcmp(nfhp_proc[i].hipac_name, "FORWARD") == 0) {
+ nfhp_proc[i].hipac = hipac_forward;
+ } else {
+ nfhp_proc[i].hipac = hipac_output;
+ }
+ }
+}
+
+static inline int
+init_data(struct proc_data *data, const struct nfhp_proc_entry *e)
+{
+ data->text = kmalloc(e->text_mem_required, GFP_KERNEL);
+ if (data->text == NULL) {
+ return -1;
+ }
+ data->stat = kmalloc(e->stat_mem_required, GFP_KERNEL);
+ if (data->stat == NULL) {
+ kfree(data->text);
+ return -1;
+ }
+ data->len = e->text_mem_required;
+ data->valid_len = 0;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)
+ data->lock = RW_LOCK_UNLOCKED;
+#else
+ rwlock_init(&data->lock);
+#endif
+ data->hipac = e->hipac;
+ data->hipac_name = e->hipac_name;
+ return 0;
+}
+
+static inline void
+free_data(struct proc_data *data)
+{
+ if (data == NULL) {
+ return;
+ }
+ if (data->text != NULL) {
+ kfree(data->text);
+ }
+ if (data->stat != NULL) {
+ kfree(data->stat);
+ }
+ kfree(data);
+}
+
+static inline void
+print_inline(struct proc_data *data, int indent)
+{
+ int i;
+
+ for (i = 0; i < indent; i++) {
+ data->valid_len += sprintf(data->text + data->valid_len,
+ indent_spc);
+ }
+}
+
+static int
+print_desc(struct proc_data *data, int indent, const char *desc)
+{
+ if (data->len < data->valid_len + indent * strlen(indent_spc) +
+ strlen(desc)) {
+ /* this should never happen */
+ printk(KERN_ERR "%s: too little memory reserved\n",
+ __FUNCTION__);
+ return -1;
+ }
+ print_inline(data, indent);
+ data->valid_len += sprintf(data->text + data->valid_len, desc);
+ return 0;
+}
+
+static int
+print_scalar(struct proc_data *data, int indent, const char *desc, u64 val)
+{
+ if (data->len < data->valid_len + indent * strlen(indent_spc) +
+ strlen(desc) + 22) {
+ /* this should never happen */
+ printk(KERN_ERR "%s: too little memory reserved\n",
+ __FUNCTION__);
+ return -1;
+ }
+ print_inline(data, indent);
+ data->valid_len += sprintf(data->text + data->valid_len, desc);
+ data->valid_len += sprintf(data->text + data->valid_len,
+ " %9llu\n", val);
+ return 0;
+}
+
+static int
+print_map(struct proc_data *data, int indent, const char *desc,
+ u32 map[], int len)
+{
+ int i, empty = 1;
+
+ if (data->len < data->valid_len + (1 + len) * indent *
+ strlen(indent_spc) + strlen(desc) + 1 + len * 25) {
+ /* this should never happen */
+ printk(KERN_ERR "%s: too little memory reserved\n",
+ __FUNCTION__);
+ return -1;
+ }
+ for (i = 0; i < len; i++) {
+ if (map[i] == 0) {
+ continue;
+ }
+ if (empty) {
+ empty = 0;
+ print_inline(data, indent);
+ data->valid_len += sprintf(data->text +
+ data->valid_len, desc);
+ data->valid_len += sprintf(data->text +
+ data->valid_len, "\n");
+ }
+ print_inline(data, indent);
+ data->valid_len += sprintf(data->text + data->valid_len,
+ " %2u: %9u\n", i, map[i]);
+ }
+ return 0;
+}
+
+static int
+print_dist(struct proc_data *data, int indent, const char *desc,
+ u32 dist[], u32 len)
+{
+ int i, empty = 1;
+
+ if (data->len < data->valid_len + (1 + len) * indent *
+ strlen(indent_spc) + strlen(desc) + 1 + (len - 1) * 39 + 38) {
+ /* this should never happen */
+ printk(KERN_ERR "%s: too little memory reserved\n",
+ __FUNCTION__);
+ return -1;
+ }
+ if (len == 0) {
+ return 0;
+ }
+ for (i = 0; i < len - 1; i++) {
+ if (dist[i] == 0) {
+ continue;
+ }
+ if (empty) {
+ empty = 0;
+ print_inline(data, indent);
+ data->valid_len += sprintf(data->text +
+ data->valid_len, desc);
+ data->valid_len += sprintf(data->text +
+ data->valid_len, "\n");
+ }
+ print_inline(data, indent);
+ data->valid_len +=
+ sprintf(data->text + data->valid_len,
+ " [%9u, %9u]: %9u\n",
+ i == 0 ? 0 : 1 << (i - 1), (1 << i) - 1,
+ dist[i]);
+ }
+ if (dist[i] == 0) {
+ return 0;
+ }
+ if (empty) {
+ print_inline(data, indent);
+ data->valid_len += sprintf(data->text + data->valid_len, desc);
+ data->valid_len += sprintf(data->text + data->valid_len, "\n");
+ }
+ print_inline(data, indent);
+ data->valid_len += sprintf(data->text + data->valid_len,
+ " [%9u, infinity[: %9u\n", 1 << (i - 1),
+ dist[i]);
+ return 0;
+}
+
+static int
+write_stat(char *buf, char **start, off_t off, int count, int *eof,
+ struct proc_data *d)
+{
+ int len = d->valid_len - off;
+
+ if (len <= 0) {
+ *eof = 1;
+ return 0;
+ }
+ if (len <= count) {
+ *eof = 1;
+ } else {
+ len = count;
+ }
+ read_lock(&d->lock);
+ memcpy(buf, d->text + off, len);
+ read_unlock(&d->lock);
+ *start = buf;
+ return len;
+}
+
+
+
+/*
+ * i/o functions
+ */
+
+static int
+info_write(struct file *file, const char *buffer, unsigned long count,
+ void *data)
+{
+ static const char nfhp_first[] = "nf-hipac-first\n";
+ static const char ipt_first[] = "iptables-first\n";
+ static char buf[32] = {0};
+ int len = count > sizeof(buf) - 1 ? sizeof(buf) - 1 : count;
+ u64 new_max_mem;
+ int ret;
+
+ if (copy_from_user(buf, buffer, len)) {
+ return -EFAULT;
+ }
+
+ /* strings don't have to contain \n at the end */
+ if (!(count == sizeof(nfhp_first) - 1 ||
+ count == sizeof(ipt_first) - 1 ||
+ count == sizeof(nfhp_first) - 2 ||
+ count == sizeof(ipt_first) - 2)) {
+ if (count >= 9 && !(count == 10 && buf[9] != '\n')) {
+ /* input definitely too large */
+ return -EINVAL;
+ }
+
+ /* interpret as number */
+ new_max_mem = simple_strtoul(buf, NULL, 10) << 20;
+ if (new_max_mem > nfhp_total_mem) {
+ new_max_mem = nfhp_total_mem;
+ }
+ if (new_max_mem == hipac_get_maxmem()) {
+ return len;
+ }
+ down(&nlhp_lock);
+ switch (hipac_set_maxmem(new_max_mem)) {
+ case HE_LOW_MEMORY:
+ up(&nlhp_lock);
+ printk(KERN_NOTICE "nf_hipac: actual memory "
+ "consumption larger than memory bound "
+ "written to " INF "\n");
+ return -EINVAL;
+ case HE_OK:
+ up(&nlhp_lock);
+ return len;
+ default:
+ /* this should never happen */
+ up(&nlhp_lock);
+ printk(KERN_ERR "%s: unexpected return value\n",
+ __FUNCTION__);
+ return -EINVAL;
+ }
+ }
+
+ /* change order */
+ if (strncmp(buf, nfhp_first, len) == 0) {
+ if (input_op.priority >= NF_IP_PRI_FILTER) {
+ nf_unregister_hook(&input_op);
+ nf_unregister_hook(&forward_op);
+ nf_unregister_hook(&output_op);
+ input_op.priority = forward_op.priority =
+ output_op.priority = NF_IP_PRI_FILTER - 1;
+ goto hook_register;
+ }
+ } else if (strncmp(buf, ipt_first, len) == 0) {
+ if (input_op.priority <= NF_IP_PRI_FILTER) {
+ nf_unregister_hook(&input_op);
+ nf_unregister_hook(&forward_op);
+ nf_unregister_hook(&output_op);
+ input_op.priority = forward_op.priority =
+ output_op.priority = NF_IP_PRI_FILTER + 1;
+ goto hook_register;
+ }
+ }
+ return len;
+
+hook_register:
+ if ((ret = nf_register_hook(&input_op)) < 0) {
+ printk(KERN_ERR "nf_hipac: initialization failed: unable to "
+ "register input hook\n");
+ goto cleanup;
+ }
+ if ((ret = nf_register_hook(&forward_op)) < 0) {
+ printk(KERN_ERR "nf_hipac: initialization failed: unable to "
+ "register forward hook\n");
+ goto cleanup_input;
+ }
+ if ((ret = nf_register_hook(&output_op)) < 0) {
+ printk(KERN_ERR "nf_hipac: initialization failed: unable to "
+ "register output hook\n");
+ goto cleanup_forward;
+ }
+ return len;
+cleanup_forward:
+ nf_unregister_hook(&forward_op);
+cleanup_input:
+ nf_unregister_hook(&input_op);
+cleanup:
+ return ret;
+}
+
+/*
+ the statistics are being rebuilt if the proc entry is read from its
+ beginning; if you modify the ruleset while at the same time reading
+ a proc file with a pager strange things might happen to your pager
+ output ;-)
+ nonetheless this is the best we can do ... at least I think so :-)
+*/
+
+#define NEED_REBUILD (off == 0 || d->valid_len == 0)
+#define LEN(x) (sizeof(x) / sizeof(*(x)))
+#define EXEC(fn) \
+do { \
+ if (fn < 0) { \
+ goto error; \
+ } \
+} while (0)
+
+static int
+info_read(char *page, char **start, off_t off, int count, int *eof,
+ void *data)
+{
+ struct proc_data *d = data;
+ struct hipac_user_stat *stat = d->stat;
+
+ if (!NEED_REBUILD) {
+ return write_stat(page, start, off, count, eof, d);
+ }
+
+ /* (re)compute statistics */
+ down(&nlhp_lock);
+ if (hipac_get_user_stat(stat) != HE_OK) {
+ /* this should never happen */
+ up(&nlhp_lock);
+ printk(KERN_ERR "%s: hipac_get_user_stat failed\n",
+ __FUNCTION__);
+ *eof = 1;
+ return 0;
+ }
+ up(&nlhp_lock);
+
+ /* (re)build text */
+ write_lock(&d->lock);
+ d->valid_len = 0;
+ EXEC(print_scalar(d, 0, "maximum memory bound: ",
+ hipac_get_maxmem()));
+ EXEC(print_scalar(d, 0, "total memory (used): ",
+ stat->total_mem_tight));
+ EXEC(print_scalar(d, 0, "total memory (allocated):",
+ stat->total_mem_real));
+ EXEC(print_scalar(d, 0, "total number of chains: ",
+ stat->chain_num));
+ EXEC(print_scalar(d, 0, "total number of rules: ",
+ stat->rule_num));
+ if (input_op.priority < NF_IP_PRI_FILTER) {
+ EXEC(print_desc(d, 0, "nf-hipac is invoked before "
+ "iptables\n"));
+ } else {
+ EXEC(print_desc(d, 0, "iptables is invoked before "
+ "nf-hipac\n"));
+ }
+#ifdef SINGLE_PATH
+ EXEC(print_desc(d, 0, "compiled with SINGLE_PATH optimization\n"));
+#else
+ EXEC(print_desc(d, 0, "compiled without SINGLE_PATH optimization\n"));
+#endif
+ write_unlock(&d->lock);
+ return write_stat(page, start, off, count, eof, d);
+
+ error:
+ write_unlock(&d->lock);
+ *eof = 1;
+ return 0;
+}
+
+static int
+mem_read(char *page, char **start, off_t off, int count, int *eof,
+ void *data)
+{
+ struct proc_data *d = data;
+ struct hipac_mem_stat *stat = d->stat;
+
+ if (!NEED_REBUILD) {
+ return write_stat(page, start, off, count, eof, d);
+ }
+
+ /* (re)compute statistics */
+ down(&nlhp_lock);
+ if (hipac_get_mem_stat(stat) != HE_OK) {
+ /* this should never happen */
+ up(&nlhp_lock);
+ printk(KERN_ERR "%s: hipac_get_mem_stat failed\n",
+ __FUNCTION__);
+ *eof = 1;
+ return 0;
+ }
+ up(&nlhp_lock);
+
+ /* (re)build text */
+ write_lock(&d->lock);
+ d->valid_len = 0;
+ EXEC(print_scalar(d, 0, "total memory (used): ",
+ stat->total_mem_tight));
+ EXEC(print_scalar(d, 0, "total memory (allocated):",
+ stat->total_mem_real));
+ EXEC(print_desc(d, 0, "memhash:\n"));
+ EXEC(print_scalar(d, 1, "number of entries: ",
+ stat->memhash_elem_num));
+ EXEC(print_scalar(d, 1, "number of buckets: ",
+ stat->memhash_len));
+ EXEC(print_scalar(d, 1, "number of entries in smallest bucket: ",
+ stat->memhash_smallest_bucket_len));
+ EXEC(print_scalar(d, 1, "number of entries in largest bucket: ",
+ stat->memhash_biggest_bucket_len));
+ EXEC(print_dist(d, 1, "number of buckets with [x, y] entries:",
+ stat->memhash_bucket_stat,
+ LEN(stat->memhash_bucket_stat)));
+ write_unlock(&d->lock);
+ return write_stat(page, start, off, count, eof, d);
+
+ error:
+ write_unlock(&d->lock);
+ *eof = 1;
+ return 0;
+}
+
+static int
+rlp_read(char *page, char **start, off_t off, int count, int *eof,
+ void *data)
+{
+ static char buf[100] = {0};
+ struct proc_data *d = data;
+ struct hipac_rlp_stat *stat = d->stat;
+ int i;
+
+ if (!NEED_REBUILD) {
+ return write_stat(page, start, off, count, eof, d);
+ }
+
+ /* (re)compute statistics */
+ down(&nlhp_lock);
+ if (hipac_get_rlp_stat(d->hipac, stat) != HE_OK) {
+ /* this should never happen */
+ up(&nlhp_lock);
+ printk(KERN_ERR "%s: hipac_get_rlp_stat failed\n",
+ __FUNCTION__);
+ *eof = 1;
+ return 0;
+ }
+ up(&nlhp_lock);
+
+ /* (re)build text */
+ write_lock(&d->lock);
+ d->valid_len = 0;
+ EXEC(print_desc(d, 0, "root chain: "));
+ EXEC(print_desc(d, 0, d->hipac_name));
+ EXEC(print_desc(d, 0, "\n"));
+ EXEC(print_scalar(d, 0, "total memory (used): ",
+ stat->total_mem_tight));
+ EXEC(print_scalar(d, 0, "total memory (allocated): ",
+ stat->total_mem_real));
+ EXEC(print_scalar(d, 1, "rlp memory (used): ",
+ stat->rlp_mem_tight));
+ EXEC(print_scalar(d, 1, "rlp memory (allocated): ",
+ stat->rlp_mem_real));
+ EXEC(print_scalar(d, 1, "termrule memory (used): ",
+ stat->termrule_mem_tight));
+ EXEC(print_scalar(d, 1, "termrule memory (allocated):",
+ stat->termrule_mem_real));
+ EXEC(print_scalar(d, 0, "number of rlps: ",
+ stat->rlp_num));
+ EXEC(print_map(d, 1, "number of rlps in dimid x:",
+ stat->rlp_dimid_num, LEN(stat->rlp_dimid_num)));
+ EXEC(print_map(d, 1, "number of rlps in depth x:",
+ stat->rlp_depth_num, LEN(stat->rlp_depth_num)));
+ EXEC(print_scalar(d, 0, "number of termrule blocks: ",
+ stat->termrule_num));
+ EXEC(print_scalar(d, 0, "total number of termrule entries:",
+ stat->termrule_ptr_num));
+ EXEC(print_scalar(d, 0, "number of keys: ",
+ stat->keys_num));
+ for (i = 0; i < LEN(stat->rlp_dimid_keys_stat); i++) {
+ if (snprintf(buf, sizeof(buf) - 1, "number of rlps in dimid"
+ " %d with [x, y] keys:", i) < 0) {
+ printk(KERN_ERR "%s: static buffer too small\n",
+ __FUNCTION__);
+ break;
+ }
+ EXEC(print_dist(d, 1, buf, stat->rlp_dimid_keys_stat[i],
+ LEN(*stat->rlp_dimid_keys_stat)));
+ }
+ EXEC(print_scalar(d, 0, "number of terminal pointers: ",
+ stat->termptr_num));
+ EXEC(print_map(d, 1, "number of terminal pointers in dimid x:",
+ stat->termptr_dimid_num,
+ LEN(stat->termptr_dimid_num)));
+ EXEC(print_map(d, 1, "number of terminal pointers in depth x:",
+ stat->termptr_depth_num,
+ LEN(stat->termptr_depth_num)));
+ EXEC(print_scalar(d, 0, "number of non-terminal pointers: ",
+ stat->nontermptr_num));
+ EXEC(print_map(d, 1, "number of non-terminal pointers in dimid x:",
+ stat->nontermptr_dimid_num,
+ LEN(stat->nontermptr_dimid_num)));
+ EXEC(print_map(d, 1, "number of non-terminal pointers in depth x:",
+ stat->nontermptr_depth_num,
+ LEN(stat->nontermptr_depth_num)));
+ EXEC(print_scalar(d, 0, "number of dt_elem structs: ",
+ stat->dt_elem_num));
+ EXEC(print_scalar(d, 1, "total number of dt_elem entries:"
+ " ", stat->dt_elem_ptr_num));
+ EXEC(print_dist(d, 1, "number of dt_elem structs with [x, y] entries:",
+ stat->dt_elem_stat, LEN(stat->dt_elem_stat)));
+ write_unlock(&d->lock);
+ return write_stat(page, start, off, count, eof, d);
+
+ error:
+ write_unlock(&d->lock);
+ *eof = 1;
+ return 0;
+}
+
+static int
+dimtree_read(char *page, char **start, off_t off, int count, int *eof,
+ void *data)
+{
+ struct proc_data *d = data;
+ struct hipac_dimtree_stat *stat = d->stat;
+
+ if (!NEED_REBUILD) {
+ return write_stat(page, start, off, count, eof, d);
+ }
+
+ /* (re)compute statistics */
+ down(&nlhp_lock);
+ if (hipac_get_dimtree_stat(d->hipac, stat) != HE_OK) {
+ /* this should never happen */
+ up(&nlhp_lock);
+ printk(KERN_ERR "%s: hipac_get_dimtree_stat failed\n",
+ __FUNCTION__);
+ *eof = 1;
+ return 0;
+ }
+ up(&nlhp_lock);
+
+ /* (re)build text */
+ write_lock(&d->lock);
+ d->valid_len = 0;
+ EXEC(print_desc(d, 0, "root chain: "));
+ EXEC(print_desc(d, 0, d->hipac_name));
+ EXEC(print_desc(d, 0, "\n"));
+ EXEC(print_scalar(d, 0, "chain memory (used): ",
+ stat->chain_mem_tight));
+ EXEC(print_scalar(d, 0, "chain memory (allocated): ",
+ stat->chain_mem_real));
+ EXEC(print_scalar(d, 0, "number of rules: ",
+ stat->rule_num));
+ EXEC(print_scalar(d, 1, "number of rules with ipt matches: "
+ " ", stat->rules_with_exec_matches));
+ EXEC(print_scalar(d, 1, "number of rules with ipt target: "
+ " ", stat->rules_with_exec_target));
+ EXEC(print_dist(d, 1, "number of \"same pos rules\" series of "
+ "length [x, y]:", stat->rules_same_pos_stat,
+ LEN(stat->rules_same_pos_stat)));
+ EXEC(print_map(d, 0, "number of rules with x dt_matches:",
+ stat->dt_match_stat, LEN(stat->dt_match_stat)));
+ write_unlock(&d->lock);
+ return write_stat(page, start, off, count, eof, d);
+
+ error:
+ write_unlock(&d->lock);
+ *eof = 1;
+ return 0;
+}
+
+static int
+hipac_r_read(char *page, char **start, off_t off, int count, int *eof,
+ void *data)
+{
+ struct proc_data *d = data;
+ struct hipac_rule_stat *stat = d->stat;
+
+ if (!NEED_REBUILD) {
+ return write_stat(page, start, off, count, eof, d);
+ }
+
+ /* (re)compute statistics */
+ down(&nlhp_lock);
+ if (hipac_get_rule_stat(d->hipac, stat) != HE_OK) {
+ /* this should never happen */
+ up(&nlhp_lock);
+ printk(KERN_ERR "%s: hipac_get_rule_stat failed\n",
+ __FUNCTION__);
+ *eof = 1;
+ return 0;
+ }
+ up(&nlhp_lock);
+
+ /* (re)build text */
+ write_lock(&d->lock);
+ d->valid_len = 0;
+ EXEC(print_desc(d, 0, "root chain: "));
+ EXEC(print_desc(d, 0, d->hipac_name));
+ EXEC(print_desc(d, 0, "\n"));
+ EXEC(print_scalar(d, 0, "number of rules: ",
+ stat->rule_num));
+ EXEC(print_scalar(d, 1, "number of rules with ipt matches: ",
+ stat->exec_match_num));
+ EXEC(print_scalar(d, 1, "number of rules with ipt target: ",
+ stat->exec_target_num));
+ EXEC(print_scalar(d, 1, "number of rules with jump target: ",
+ stat->jump_target_num));
+ EXEC(print_scalar(d, 1, "number of rules with return target:",
+ stat->return_target_num));
+ EXEC(print_map(d, 0, "number of rules with x hipac_matches: ",
+ stat->hipac_match_stat, LEN(stat->hipac_match_stat)));
+ EXEC(print_map(d, 0, "number of rules with x inverted matches:",
+ stat->inv_rules_stat, LEN(stat->inv_rules_stat)));
+ write_unlock(&d->lock);
+ return write_stat(page, start, off, count, eof, d);
+
+ error:
+ write_unlock(&d->lock);
+ *eof = 1;
+ return 0;
+}
+
+static int
+hipac_c_read(char *page, char **start, off_t off, int count, int *eof,
+ void *data)
+{
+ struct proc_data *d = data;
+ struct hipac_chain_stat *stat = d->stat;
+
+ if (!NEED_REBUILD) {
+ return write_stat(page, start, off, count, eof, d);
+ }
+
+ /* (re)compute statistics */
+ down(&nlhp_lock);
+ if (hipac_get_chain_stat(stat) != HE_OK) {
+ /* this should never happen */
+ up(&nlhp_lock);
+ printk(KERN_ERR "%s: hipac_get_chain_stat failed\n",
+ __FUNCTION__);
+ *eof = 1;
+ return 0;
+ }
+ up(&nlhp_lock);
+
+ /* (re)build text */
+ write_lock(&d->lock);
+ d->valid_len = 0;
+ EXEC(print_scalar(d, 0, "chain memory (used): ", stat->mem_tight));
+ EXEC(print_scalar(d, 0, "chain memory (allocated):", stat->mem_real));
+ EXEC(print_scalar(d, 0, "number of chains: ", stat->chain_num));
+ EXEC(print_scalar(d, 0, "number of rules: ", stat->rule_num));
+ EXEC(print_dist(d, 1, "number of chains with [x, y] prefixes: ",
+ stat->prefix_stat, LEN(stat->prefix_stat)));
+ EXEC(print_dist(d, 1, "number of chains with [x, y] incoming arcs:",
+ stat->incoming_stat, LEN(stat->incoming_stat)));
+ EXEC(print_dist(d, 1, "number of chains with [x, y] outgoing arcs:",
+ stat->outgoing_stat, LEN(stat->outgoing_stat)));
+ write_unlock(&d->lock);
+ return write_stat(page, start, off, count, eof, d);
+
+ error:
+ write_unlock(&d->lock);
+ *eof = 1;
+ return 0;
+}
+
+void
+hpproc_init(u64 total_mem)
+{
+ struct proc_data *data;
+ int i, j;
+
+ nfhp_total_mem = total_mem;
+
+ /* create proc directories */
+ nfhipac_dir = proc_mkdir(proc_nfhipac_dir, init_net.proc_net);
+ if (nfhipac_dir == NULL) {
+ printk(KERN_NOTICE "nf_hipac: unable to create proc "
+ "directory\n");
+ return;
+ }
+ stat_dir = proc_mkdir(proc_stat_dir, nfhipac_dir);
+ if (stat_dir == NULL) {
+ printk(KERN_NOTICE "nf_hipac: unable to create proc "
+ "directory\n");
+ goto cleanup_nfhipac_dir;
+ }
+
+ /* create statistics entries */
+ init_nfhp_proc(nfhipac_dir, stat_dir);
+ for (i = 0; i < sizeof(nfhp_proc) / sizeof(*nfhp_proc); i++) {
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
+ if (data == NULL) {
+ printk(KERN_NOTICE "nf_hipac: unable to create "
+ "proc infrastructure because of low memory\n");
+ goto cleanup;
+ }
+ if (init_data(data, &nfhp_proc[i]) < 0) {
+ printk(KERN_NOTICE "nf_hipac: unable to create "
+ "proc infrastructure because of low memory\n");
+ goto cleanup;
+ }
+ nfhp_proc[i].entry = create_proc_entry(nfhp_proc[i].name,
+ nfhp_proc[i].mode,
+ nfhp_proc[i].parent);
+ if (nfhp_proc[i].entry == NULL) {
+ printk(KERN_NOTICE "nf_hipac: unable to create proc "
+ "entry\n");
+ goto cleanup;
+ }
+ nfhp_proc[i].entry->data = data;
+ nfhp_proc[i].entry->read_proc = nfhp_proc[i].read_fn;
+ nfhp_proc[i].entry->write_proc = nfhp_proc[i].write_fn;
+ }
+ return;
+
+ cleanup:
+ for (j = 0; j <= i; j++)
+ remove_proc_entry(nfhp_proc[j].name, nfhp_proc[i].parent);
+ remove_proc_entry(proc_stat_dir, nfhipac_dir);
+ cleanup_nfhipac_dir:
+ remove_proc_entry(proc_nfhipac_dir, init_net.proc_net);
+ return;
+}
+
+void
+hpproc_exit(void)
+{
+ int i;
+
+ for (i = 0; i < sizeof(nfhp_proc) / sizeof(*nfhp_proc); i++)
+ remove_proc_entry(nfhp_proc[i].name, nfhp_proc[i].parent);
+ remove_proc_entry(proc_stat_dir, nfhipac_dir);
+ remove_proc_entry(proc_nfhipac_dir, init_net.proc_net);
+}
diff -urN nf-hipac/kernel/nfhp_proc.h nfhipac/kernel/nfhp_proc.h
--- nf-hipac/kernel/nfhp_proc.h 1970-01-01 08:00:00.000000000 +0800
+++ nfhipac/kernel/nfhp_proc.h 2014-11-21 12:36:09.000000000 +0800
@@ -0,0 +1,24 @@
+/*
+ * High performance packet classification
+ *
+ *
+ * (c) 2002-2003 hipac core team :
+ * +---------------------------+--------------------------+
+ * | Michael Bellion | Thomas Heinz |
+ * | | |
+ * +---------------------------+--------------------------+
+ *
+ * Licenced under the GNU General Public Licence, version 2.
+ */
+
+
+#ifndef _NFHP_PROC_H
+#define _NFHP_PROC_H
+
+void
+hpproc_init(u64 total_mem);
+
+void
+hpproc_exit(void);
+
+#endif
diff -urN nf-hipac/kernel/rlp.c nfhipac/kernel/rlp.c
--- nf-hipac/kernel/rlp.c 1970-01-01 08:00:00.000000000 +0800
+++ nfhipac/kernel/rlp.c 2014-11-21 12:36:09.000000000 +0800
@@ -0,0 +1,537 @@
+/*
+ * High performance packet classification
+ *
+ *
+ * (c) 2004-2005 MARA Systems AB
+ * +-----------------------------+
+ * | Michael Bellion |
+ * | |
+ * +-----------------------------+
+ *
+ * Licenced under the GNU General Public Licence, version 2.
+ */
+
+
+#include "global.h"
+#include "rlp.h"
+
+#define KEYSIZE(bittype) (1 << ((bittype) + 1))
+#define PTR_ALIGN_(v) (((v) + (__alignof__(void *) - 1)) & \
+ (~(__alignof__(void *) - 1)))
+#define FIRST_KEY(spec) ((void *) (spec) + sizeof(*(spec)) + \
+ HAS_WILDCARD_SPEC(spec) * sizeof(void *))
+#define FIRST_NEXTSPEC(spec) (FIRST_KEY(spec) + \
+ PTR_ALIGN_((spec)->num * KEYSIZE((spec)->bittype)))
+
+
+/*
+ * optimized locate functions
+ */
+
+static struct gen_spec *
+#ifdef SINGLE_PATH
+u16_locate(const struct rlp_spec *spec, const void *packet, int *hotdrop)
+#else
+u16_locate(const struct rlp_spec *spec, const void *packet, int *hotdrop,
+ struct gen_spec **nodes, __u8 *nodes_len)
+#endif
+{
+ const __u16 key = extract_fn[spec->dimid](packet, hotdrop);
+ const __u16 *part = (void *) spec + sizeof(*spec);
+ __u16 left = 0;
+ __u16 right = spec->num - 1;
+ __u16 pos;
+
+ while (left <= right) {
+ pos = (left + right) >> 1;
+ if (part[pos] < key) {
+ left = pos + 1;
+ } else if (pos && part[pos - 1] >= key) {
+ right = pos - 1;
+ } else {
+ return *(struct gen_spec **)
+ ((void *) part + PTR_ALIGN_(spec->num << 1) +
+ pos * sizeof(void *));
+ }
+ }
+
+ /* should never be reached */
+ assert(1 == 0);
+ return NULL;
+}
+
+#ifdef SINGLE_PATH
+static struct gen_spec *
+u32_locate(const struct rlp_spec *spec, const void *packet, int *hotdrop)
+{
+ const __u32 key = extract_fn[spec->dimid](packet, hotdrop);
+ const __u32 *part = (void *) spec + sizeof(*spec);
+ __u32 left = 0;
+ __u32 right = spec->num - 1;
+ __u32 pos;
+
+ while (left <= right) {
+ pos = (left + right) >> 1;
+ if (part[pos] < key) {
+ left = pos + 1;
+ } else if (pos && part[pos - 1] >= key) {
+ right = pos - 1;
+ } else {
+ return *(struct gen_spec **)
+ ((void *) part + PTR_ALIGN_(spec->num << 2) +
+ pos * sizeof(void *));
+ }
+ }
+
+ /* should never be reached */
+ assert(1 == 0);
+ return NULL;
+}
+#else
+static struct gen_spec *
+u32_locate(const struct rlp_spec *spec, const void *packet, int *hotdrop,
+ struct gen_spec **nodes, __u8 *nodes_len)
+{
+ const __u32 key = extract_fn[spec->dimid](packet, hotdrop);
+ const __u32 *part = (void *) spec + sizeof(*spec) +
+ HAS_WILDCARD_SPEC(spec) * sizeof(void *);
+ __u32 left = 0;
+ __u32 right = spec->num - 1;
+ __u32 pos;
+
+ while (left <= right) {
+ pos = (left + right) >> 1;
+ if (part[pos] < key) {
+ left = pos + 1;
+ } else if (pos && part[pos - 1] >= key) {
+ right = pos - 1;
+ } else {
+ if (HAS_WILDCARD_SPEC(spec) && *((void **) part - 1)
+ && !(*hotdrop)) {
+ nodes[(*nodes_len)++] = *((void **) part - 1);
+ }
+ return *(struct gen_spec **)
+ ((void *) part + PTR_ALIGN_(spec->num << 2) +
+ pos * sizeof(void *));
+ }
+ }
+
+ /* should never be reached */
+ assert(1 == 0);
+ return NULL;
+}
+#endif // SINGLE_PATH
+
+
+
+/*
+ * lookup helper
+ */
+
+static inline int
+u16_key_exists(const struct rlp_spec *spec, __u32 key, struct locate_inf *inf,
+ __u32 *position)
+{
+ const __u16 *part = FIRST_KEY(spec);
+ __u16 left = 0;
+ __u16 right = spec->num - 1;
+ __u16 pos;
+
+ while (left <= right) {
+ pos = (left + right) >> 1;
+ if (part[pos] < key) {
+ left = pos + 1;
+ } else if (pos && part[pos - 1] >= key) {
+ right = pos - 1;
+ } else {
+ if (inf != NULL) {
+ inf->key = part[pos];
+ inf->nextspec = FIRST_NEXTSPEC(spec) +
+ pos * sizeof(void *);
+ }
+ if (position != NULL) {
+ *position = pos;
+ }
+ return part[pos] == key;
+ }
+ }
+
+ /* should never be reached */
+ assert(1 == 0);
+ return 0;
+}
+
+static inline int
+u32_key_exists(const struct rlp_spec *spec, __u32 key, struct locate_inf *inf,
+ __u32 *position)
+{
+ const __u32 *part = FIRST_KEY(spec);
+ __u32 left = 0;
+ __u32 right = spec->num - 1;
+ __u32 pos;
+
+ while (left <= right) {
+ pos = (left + right) >> 1;
+ if (part[pos] < key) {
+ left = pos + 1;
+ } else if (pos && part[pos - 1] >= key) {
+ right = pos - 1;
+ } else {
+ if (inf != NULL) {
+ inf->key = part[pos];
+ inf->nextspec = FIRST_NEXTSPEC(spec) +
+ pos * sizeof(void *);
+ }
+ if (position != NULL) {
+ *position = pos;
+ }
+ return part[pos] == key;
+ }
+ }
+
+ /* should never be reached */
+ assert(1 == 0);
+ return 0;
+}
+
+
+
+/*
+ * interface functions
+ */
+
+struct ptrblock **
+termrule(const struct rlp_spec *spec)
+{
+ if (unlikely(spec == NULL)) {
+ ARG_MSG;
+ return NULL;
+ }
+
+ return (struct ptrblock **)
+ (FIRST_NEXTSPEC(spec) + spec->num * sizeof(void *));
+}
+
+struct rlp_spec *
+rlp_new(__u8 bittype, __u8 dimid, __u8 ins_num, const __u32 key[],
+ struct gen_spec *nextspec[])
+{
+ struct rlp_spec *new_rlp;
+
+ if (unlikely(bittype > BIT_U32 || key == NULL || nextspec == NULL ||
+ !(ins_num == 1 || ins_num == 2) ||
+ (ins_num == 1 && key[0] != hipac_maxkey(bittype)) ||
+ (ins_num == 2 && (key[0] >= key[1] ||
+ key[1] != hipac_maxkey(bittype))))) {
+ ARG_MSG;
+ return NULL;
+ }
+
+ new_rlp = hp_alloc(sizeof(*new_rlp) +
+ HAS_WILDCARD_DIM(dimid) * sizeof(void *) +
+ PTR_ALIGN_(ins_num * KEYSIZE(bittype)) +
+ (ins_num + 1) * sizeof(void *), 1);
+ if (new_rlp == NULL) {
+ return NULL;
+ }
+ new_rlp->rlp = 1;
+ new_rlp->bittype = bittype;
+ new_rlp->dimid = dimid;
+ new_rlp->newspec = 0;
+ new_rlp->num = ins_num;
+ *termrule(new_rlp) = NULL;
+ if (HAS_WILDCARD_DIM(dimid)) {
+ *WILDCARD(new_rlp) = NULL;
+ }
+
+ switch (bittype) {
+ case BIT_U16: {
+ __u16 *k = FIRST_KEY(new_rlp);
+ struct gen_spec **s = FIRST_NEXTSPEC(new_rlp);
+ new_rlp->locate = u16_locate;
+ k[0] = key[0];
+ s[0] = nextspec[0];
+ if (ins_num == 2) {
+ k[1] = key[1];
+ s[1] = nextspec[1];
+ }
+ break;
+ }
+ case BIT_U32: {
+ __u32 *k = FIRST_KEY(new_rlp);
+ struct gen_spec **s = FIRST_NEXTSPEC(new_rlp);
+ new_rlp->locate = u32_locate;
+ k[0] = key[0];
+ s[0] = nextspec[0];
+ if (ins_num == 2) {
+ k[1] = key[1];
+ s[1] = nextspec[1];
+ }
+ break;
+ }
+ }
+ return new_rlp;
+}
+
+__u32
+rlp_size(const struct rlp_spec *spec)
+{
+ if (unlikely(spec == NULL)) {
+ ARG_MSG;
+ return 0;
+ }
+
+ return sizeof(*spec) +
+ HAS_WILDCARD_SPEC(spec) * sizeof(void *) +
+ PTR_ALIGN_(spec->num * KEYSIZE(spec->bittype)) +
+ (spec->num + 1) * sizeof(void *);
+}
+
+struct gen_spec **
+rlp_nextspec(const struct rlp_spec *spec)
+{
+ if (unlikely(spec == NULL)) {
+ ARG_MSG;
+ return NULL;
+ }
+
+ return FIRST_NEXTSPEC(spec);
+}
+
+hipac_error
+rlp_clone(const struct rlp_spec *spec, struct rlp_spec **clone)
+{
+ hipac_error stat;
+ __u32 size;
+
+ if (unlikely(spec == NULL || clone == NULL)) {
+ ARG_ERR;
+ }
+
+ size = rlp_size(spec);
+ *clone = hp_alloc(size, 1);
+ if (*clone == NULL) {
+ return HE_LOW_MEMORY;
+ }
+ memcpy(*clone, spec, size);
+ (*clone)->newspec = 0;
+ stat = ptrblock_clone(*termrule(spec), termrule(*clone));
+ if (stat < 0) {
+ hp_free(*clone);
+ *clone = NULL;
+ return stat;
+ }
+ return HE_OK;
+}
+
+hipac_error
+rlp_insert(const struct rlp_spec *spec, __u8 ins_num, const __u32 key[],
+ struct gen_spec *nextspec[], struct rlp_spec **result)
+{
+ void *first_ksrc, *ksrc, *kdst, *nsrc, *ndst;
+ struct gen_spec *lnspec[2];
+ __u32 pos[2], lkey[2];
+ __u32 i, ksize, nsize;
+ hipac_error stat;
+
+ if (unlikely(spec == NULL || key == NULL || nextspec == NULL ||
+ result == NULL || !(ins_num == 1 || ins_num == 2) ||
+ (ins_num == 1 &&
+ key[0] >= hipac_maxkey(spec->bittype)) ||
+ (ins_num == 2 &&
+ (key[0] >= key[1] ||
+ key[1] >= hipac_maxkey(spec->bittype))))) {
+ ARG_ERR;
+ }
+
+ switch (spec->bittype) {
+ case BIT_U16: {
+ __u8 ct = 0;
+ if (!u16_key_exists(spec, key[0], NULL, &pos[0])) {
+ lkey[ct] = key[0];
+ lnspec[ct++] = nextspec[0];
+ }
+ if (ins_num == 2 &&
+ !u16_key_exists(spec, key[1], NULL, &pos[ct])) {
+ assert(ct == 0 || pos[0] <= pos[1]);
+ lkey[ct] = key[1];
+ lnspec[ct++] = nextspec[1];
+ }
+ ins_num = ct;
+ break;
+ }
+ case BIT_U32: {
+ __u8 ct = 0;
+ if (!u32_key_exists(spec, key[0], NULL, &pos[0])) {
+ lkey[ct] = key[0];
+ lnspec[ct++] = nextspec[0];
+ }
+ if (ins_num == 2 &&
+ !u32_key_exists(spec, key[1], NULL, &pos[ct])) {
+ assert(ct == 0 || pos[0] <= pos[1]);
+ lkey[ct] = key[1];
+ lnspec[ct++] = nextspec[1];
+ }
+ ins_num = ct;
+ break;
+ }
+ }
+
+ /* ins_num can be 0, 1 or 2 here */
+ *result = hp_alloc(sizeof(**result) +
+ HAS_WILDCARD_SPEC(spec) * sizeof(void *) +
+ PTR_ALIGN_((spec->num + ins_num) *
+ KEYSIZE(spec->bittype)) +
+ (spec->num + ins_num + 1) * sizeof(void *), 1);
+ if (*result == NULL) {
+ return HE_LOW_MEMORY;
+ }
+ memcpy(*result, spec, sizeof(*spec) +
+ HAS_WILDCARD_SPEC(spec) * sizeof(void *));
+ (*result)->newspec = 0;
+ (*result)->num += ins_num;
+ stat = ptrblock_clone(*termrule(spec), termrule(*result));
+ if (stat < 0) {
+ hp_free(*result);
+ *result = NULL;
+ return stat;
+ }
+
+ first_ksrc = FIRST_KEY(spec);
+ ksrc = first_ksrc;
+ kdst = FIRST_KEY(*result);
+ nsrc = FIRST_NEXTSPEC(spec);
+ ndst = FIRST_NEXTSPEC(*result);
+ for (i = 0; i < ins_num; i++) {
+ ksize = (first_ksrc + pos[i] * KEYSIZE(spec->bittype)) - ksrc;
+ nsize = (ksize / KEYSIZE(spec->bittype)) * sizeof(void *);
+ if (ksize > 0) {
+ memcpy(kdst, ksrc, ksize);
+ memcpy(ndst, nsrc, nsize);
+ }
+ ksrc += ksize;
+ kdst += ksize;
+ nsrc += nsize;
+ ndst += nsize;
+ switch (spec->bittype) {
+ case BIT_U16:
+ *(__u16 *) kdst = lkey[i];
+ break;
+ case BIT_U32:
+ *(__u32 *) kdst = lkey[i];
+ break;
+ }
+ *(struct gen_spec **) ndst = lnspec[i];
+ kdst += KEYSIZE(spec->bittype);
+ ndst += sizeof(void *);
+ }
+ ksize = (spec->num - (ins_num == 0 ? 0 : pos[ins_num - 1])) *
+ KEYSIZE(spec->bittype);
+ assert(ksize > 0);
+ nsize = (ksize / KEYSIZE(spec->bittype)) * sizeof(void *);
+ memcpy(kdst, ksrc, ksize);
+ memcpy(ndst, nsrc, nsize);
+ return HE_OK;
+}
+
+hipac_error
+rlp_delete(const struct rlp_spec *spec, __u8 del_num, const __u32 key[],
+ struct rlp_spec **result)
+{
+ void *first_ksrc, *ksrc, *kdst, *nsrc, *ndst;
+ __u32 i, ksize, nsize;
+ hipac_error stat;
+ __u32 pos[2];
+
+ if (unlikely(spec == NULL || key == NULL || result == NULL ||
+ del_num >= spec->num || !(del_num == 1 || del_num == 2) ||
+ (del_num == 1 &&
+ key[0] >= hipac_maxkey(spec->bittype)) ||
+ (del_num == 2 &&
+ (key[0] >= key[1] ||
+ key[1] >= hipac_maxkey(spec->bittype))))) {
+ ARG_ERR;
+ }
+
+ switch (spec->bittype) {
+ case BIT_U16:
+ if (!u16_key_exists(spec, key[0], NULL, &pos[0])) {
+ ARG_ERR;
+ }
+ if (del_num == 2 &&
+ !u16_key_exists(spec, key[1], NULL, &pos[1])) {
+ ARG_ERR;
+ }
+ break;
+ case BIT_U32:
+ if (!u32_key_exists(spec, key[0], NULL, &pos[0])) {
+ ARG_ERR;
+ }
+ if (del_num == 2 &&
+ !u32_key_exists(spec, key[1], NULL, &pos[1])) {
+ ARG_ERR;
+ }
+ break;
+ }
+
+ *result = hp_alloc(sizeof(**result) +
+ HAS_WILDCARD_SPEC(spec) * sizeof(void *) +
+ PTR_ALIGN_((spec->num - del_num) *
+ KEYSIZE(spec->bittype)) +
+ (spec->num - del_num + 1) * sizeof(void *), 1);
+ if (*result == NULL) {
+ return HE_LOW_MEMORY;
+ }
+ memcpy(*result, spec, sizeof(*spec) +
+ HAS_WILDCARD_SPEC(spec) * sizeof(void *));
+ (*result)->newspec = 0;
+ (*result)->num -= del_num;
+ stat = ptrblock_clone(*termrule(spec), termrule(*result));
+ if (stat < 0) {
+ hp_free(*result);
+ *result = NULL;
+ return stat;
+ }
+
+ first_ksrc = FIRST_KEY(spec);
+ ksrc = first_ksrc;
+ kdst = FIRST_KEY(*result);
+ nsrc = FIRST_NEXTSPEC(spec);
+ ndst = FIRST_NEXTSPEC(*result);
+ for (i = 0; i < del_num; i++) {
+ ksize = (first_ksrc + pos[i] * KEYSIZE(spec->bittype)) - ksrc;
+ nsize = (ksize / KEYSIZE(spec->bittype)) * sizeof(void *);
+ if (ksize > 0) {
+ memcpy(kdst, ksrc, ksize);
+ memcpy(ndst, nsrc, nsize);
+ }
+ ksrc += ksize + KEYSIZE(spec->bittype);
+ kdst += ksize;
+ nsrc += nsize + sizeof(void *);
+ ndst += nsize;
+ }
+ ksize = (spec->num - pos[del_num - 1] - 1) * KEYSIZE(spec->bittype);
+ assert(ksize > 0);
+ nsize = (ksize / KEYSIZE(spec->bittype)) * sizeof(void *);
+ memcpy(kdst, ksrc, ksize);
+ memcpy(ndst, nsrc, nsize);
+ return HE_OK;
+}
+
+hipac_error
+rlp_locate(const struct rlp_spec *spec, struct locate_inf *inf, __u32 key)
+{
+ if (unlikely(spec == NULL || inf == NULL)) {
+ ARG_ERR;
+ }
+
+ switch (spec->bittype) {
+ case BIT_U16:
+ u16_key_exists(spec, key, inf, NULL);
+ break;
+ case BIT_U32:
+ u32_key_exists(spec, key, inf, NULL);
+ break;
+ }
+ return HE_OK;
+}
diff -urN nf-hipac/kernel/rlp.h nfhipac/kernel/rlp.h
--- nf-hipac/kernel/rlp.h 1970-01-01 08:00:00.000000000 +0800
+++ nfhipac/kernel/rlp.h 2014-11-21 12:36:09.000000000 +0800
@@ -0,0 +1,142 @@
+/*
+ * High performance packet classification
+ *
+ *
+ * (c) 2004-2005 MARA Systems AB
+ * +-----------------------------+
+ * | Michael Bellion |
+ * | |
+ * +-----------------------------+
+ *
+ * Licenced under the GNU General Public Licence, version 2.
+ */
+
+
+#ifndef _RLP_H
+#define _RLP_H
+
+#include "global.h"
+
+
+/* rlp header */
+struct rlp_spec
+{
+ unsigned rlp : 1; // rlp identifier (must be 1)
+ unsigned bittype : 1; // {BIT_U16, BIT_U32}
+ unsigned dimid : 5; // dimension id
+ unsigned newspec : 1; // indicates whether the rlp is contained
+ // in newspec
+ unsigned num : 24; // number of elements in the rlp
+
+#ifdef SINGLE_PATH
+ struct gen_spec * (*locate)(const struct rlp_spec *spec,
+ const void *packet, int *hotdrop);
+#else
+ struct gen_spec * (*locate)(const struct rlp_spec *spec,
+ const void *packet, int *hotdrop,
+ struct gen_spec **nodes, __u8 *nodes_len);
+#endif
+};
+
+/* rlp header test */
+#define IS_RLP(r) (((struct gen_spec *) (r))->rlp)
+
+/* test whether rlp has a wildcard pointer */
+#ifdef SINGLE_PATH
+# define HAS_WILDCARD_SPEC(spec) 0
+# define HAS_WILDCARD_DIM(dimid) 0
+#else
+# define HAS_WILDCARD_SPEC(spec) ((spec)->dimid == 1 || (spec)->dimid == 2)
+# define HAS_WILDCARD_DIM(dimid) ((dimid) == 1 || (dimid) == 2)
+#endif
+
+/* wildcard pointer to the next rlp spec */
+#define WILDCARD(r) ((struct gen_spec **) ((__u8 *) (r) + \
+ sizeof(struct rlp_spec)))
+
+/* key and nextspec pointer found by rlp_locate */
+struct locate_inf
+{
+ __u32 key;
+ struct gen_spec **nextspec;
+};
+
+
+/* return address of termrule pointer */
+struct ptrblock **
+termrule(const struct rlp_spec *spec);
+
+/* return new rlp with ins_num (1 or 2) elements inserted; the elements
+ are (key[i], nextspec[i]) where 0 <= i < ins_num; if ins_num == 2 then
+ key[1] > key[0] */
+struct rlp_spec *
+rlp_new(__u8 bittype, __u8 dimid, __u8 ins_num, const __u32 key[],
+ struct gen_spec *nextspec[]);
+
+/* return the size of the rlp */
+__u32
+rlp_size(const struct rlp_spec *spec);
+
+/* return array of spec->num nextspec pointers;
+ NOTE: this abstraction breaks as soon as the RLP solving data structure does
+ not contain a contiguous array of nextspec pointers */
+struct gen_spec **
+rlp_nextspec(const struct rlp_spec *spec);
+
+static inline void
+rlp_free(struct rlp_spec *spec)
+{
+ struct ptrblock *term;
+
+ if (spec == NULL) {
+ ARG_MSG;
+ return;
+ }
+ term = *termrule(spec);
+ if (term != NULL) {
+ ptrblock_free(term);
+ }
+ hp_free(spec);
+}
+
+static inline int
+rlp_spec_eq(const struct rlp_spec *spec1, const struct rlp_spec *spec2)
+{
+ if (spec1 == NULL || spec2 == NULL || !IS_RLP(spec1) ||
+ !IS_RLP(spec2)) {
+ ARG_MSG;
+ return 0;
+ }
+ return spec1->bittype == spec2->bittype &&
+ spec1->dimid == spec2->dimid &&
+ spec1->num == spec2->num;
+}
+
+/* clone rlp (not recursively);
+ possible errors: HE_LOW_MEMORY, HE_IMPOSSIBLE_CONDITION */
+hipac_error
+rlp_clone(const struct rlp_spec *spec, struct rlp_spec **clone);
+
+/* insert (key[i], nextspec[i]) into the rlp where 0 <= i < ins_num <= 2
+ and store the new rlp in result; if ins_num == 2 then key[1] must
+ be > key[0];
+ possible errors: HE_LOW_MEMORY, HE_IMPOSSIBLE_CONDITION */
+hipac_error
+rlp_insert(const struct rlp_spec *spec, __u8 ins_num, const __u32 key[],
+ struct gen_spec *nextspec[], struct rlp_spec **result);
+
+/* delete (key[i], nextspec[i]) from the rlp where 0 <= i < del_num <= 2
+ and nextspec[i] is associated with key[i] and store the new rlp in
+ result; if del_num == 2 then key[1] must be > key[0];
+ possible errors: HE_LOW_MEMORY, HE_IMPOSSIBLE_CONDITION */
+hipac_error
+rlp_delete(const struct rlp_spec *spec, __u8 del_num, const __u32 key[],
+ struct rlp_spec **result);
+
+/* return (key', nextspec) where key' = min{k : k >= key} and nextspec
+ is associated with key';
+ possible errors: HE_LOW_MEMORY, HE_IMPOSSIBLE_CONDITION */
+hipac_error
+rlp_locate(const struct rlp_spec *spec, struct locate_inf *inf, __u32 key);
+
+#endif
diff -urN nf-hipac/Makefile nfhipac/Makefile
--- nf-hipac/Makefile 1970-01-01 08:00:00.000000000 +0800
+++ nfhipac/Makefile 2014-11-21 11:38:54.000000000 +0800
@@ -0,0 +1,15 @@
+
+
+all:
+ $(MAKE) -C user
+ $(MAKE) -C kernel
+
+clean:
+ $(MAKE) -C user clean
+ $(MAKE) -C kernel clean
+
+install: all
+ $(MAKE) -C user install
+ @cp kernel/nf_hipac.ko /lib/modules/`uname -r`/kernel/net/ipv4/netfilter/
+ @depmod -a
+
diff -urN nf-hipac/README nfhipac/README
--- nf-hipac/README 1970-01-01 08:00:00.000000000 +0800
+++ nfhipac/README 2014-11-21 12:56:27.000000000 +0800
@@ -0,0 +1,5 @@
+
+此软件包来自http://www.hipac.org/,本人仅仅做了移植工作(API兼容性),特向原作者表示感谢,且表达敬意。
+
+由于相关作者在2005年停止了更新,影响了nf-hipac的使用和推广。鉴于此算法极具价值以及其可用性(简单,易用),实则有模块化的必要。
+目前该代码在内核2.6.32和3.9.6上进行过测试。2.6.13不再支持。需要在低版本内核上支持hipac的,请参照http://www.hipac.org/上的说明。
diff -urN nf-hipac/user/doit.sh nfhipac/user/doit.sh
--- nf-hipac/user/doit.sh 1970-01-01 08:00:00.000000000 +0800
+++ nfhipac/user/doit.sh 2014-11-21 11:09:14.000000000 +0800
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+./nf-hipac -A INPUT -s 1.1.1.1 -j DROP
diff -urN nf-hipac/user/hipac.h nfhipac/user/hipac.h
--- nf-hipac/user/hipac.h 1970-01-01 08:00:00.000000000 +0800
+++ nfhipac/user/hipac.h 2014-11-21 11:09:14.000000000 +0800
@@ -0,0 +1,623 @@
+/*
+ * High performance packet classification
+ *
+ *
+ * (c) 2004-2005 MARA Systems AB
+ * +-----------------------------+
+ * | Michael Bellion |
+ * | |
+ * +-----------------------------+
+ *
+ * (c) 2002-2003 hipac core team :
+ * +---------------------------+--------------------------+
+ * | Michael Bellion | Thomas Heinz |
+ * | | |
+ * +---------------------------+--------------------------+
+ *
+ * Licenced under the GNU General Public Licence, version 2.
+ */
+
+
+#ifndef _HIPAC_H
+#define _HIPAC_H
+
+#include "mode.h"
+
+/* values of bittype in specification header */
+#define BIT_U16 0
+#define BIT_U32 1
+
+/* maximum length of a hipac chain name (including terminating '\0') */
+#define HIPAC_CHAIN_NAME_MAX_LEN 32
+
+/* representation of the match [left, right] associated with a dimension id;
+ [left, right] must not be a wildcard match */
+struct hipac_match
+{
+ unsigned dimid : 5;
+ unsigned invert : 1;
+ __u32 left;
+ __u32 right;
+ char next_match[0];
+};
+
+struct hipac_rule
+{
+ __u32 pos;
+ char cmp_start[0];
+ __u32 size;
+ __u32 origin;
+ __u8 action;
+ __u8 native_mct;
+ __u16 match_offset;
+ __u32 target_offset;
+ struct hipac_match first_match[0];
+};
+
+struct hipac_chain_info
+{
+ char *label;
+ __u8 policy;
+ __u8 is_internal_chain;
+ __u32 rule_num;
+ struct list_head *chain_head;
+};
+
+
+
+/* return values of function based match executor */
+typedef enum
+{
+ MATCH_YES,
+ MATCH_NO,
+ MATCH_HOTDROP
+} hipac_match_t;
+
+
+/* hipac_rule action value; TARGET_DUMMY is reserved for internal usage only;
+ the function based target exectutor may return TARGET_ACCEPT, TARGET_DROP
+ or TARGET_NONE */
+typedef enum
+{
+ TARGET_DROP = NF_DROP,
+ TARGET_ACCEPT = NF_ACCEPT,
+ TARGET_NONE = (NF_ACCEPT > NF_DROP ? NF_ACCEPT + 1 : NF_DROP + 1),
+ TARGET_RETURN,
+ TARGET_DUMMY,
+ TARGET_EXEC,
+ TARGET_CHAIN
+} hipac_target_t;
+
+
+/* function based match and target executor function types */
+typedef hipac_match_t (* hipac_match_exec_t) (const void *packet,
+ void *first_match, void *end);
+typedef hipac_target_t (* hipac_target_exec_t) (const void *packet,
+ void *target);
+
+
+/* dimension extractor function type */
+typedef __u32 (* hipac_extract_t) (const void *packet, int *hotdrop);
+
+
+/* equality function type */
+typedef int (* hipac_eq_exec_t) (const struct hipac_rule *r1,
+ const struct hipac_rule *r2);
+
+
+/* constructor/destructor function type */
+typedef void (* hipac_copy_constructor_t) (const struct hipac_rule *r_org,
+ struct hipac_rule *r_new);
+typedef void (* hipac_destroy_exec_t) (struct hipac_rule *r);
+
+
+/* hipac error codes */
+typedef enum
+{
+ HE_OK = 0,
+ HE_IMPOSSIBLE_CONDITION = -1,
+ HE_LOW_MEMORY = -2,
+ HE_CHAIN_EXISTS = -3,
+ HE_CHAIN_NOT_EXISTENT = -4,
+ HE_CHAIN_IS_EMPTY = -5,
+ HE_CHAIN_NOT_EMPTY = -6,
+ HE_CHAIN_IS_USERDEFINED = -7,
+ HE_CHAIN_IS_CONNECTED = -8,
+ HE_CHAIN_IS_REFERENCED = -9,
+ HE_CHAIN_NOT_NATIVE = -10,
+ HE_CHAIN_IS_NATIVE = -11,
+ HE_RULE_NOT_EXISTENT = -12,
+ HE_RULE_ORIGIN_MISMATCH = -13,
+ HE_RULE_PREFIX_MISMATCH = -14,
+ HE_LOOP_DETECTED = -15,
+ HE_REC_LIMIT = -16,
+ HE_TARGET_CHAIN_NOT_EXISTENT = -17,
+ HE_TARGET_CHAIN_IS_NATIVE = -18,
+ HE_NATIVE_CHAIN_EXISTS = -19,
+ HE_NEXT_ERROR = -100 // shouldn't be changed
+} hipac_error;
+
+
+
+/* return maximum key of a dimension with the given bittype */
+static inline __u32
+hipac_maxkey(__u8 bittype)
+{
+ if (bittype == BIT_U16)
+ return 0xffff;
+ return 0xffffffff;
+}
+
+
+/* init hipac data structures;
+ MUST be called once at the beginning in order to let the other
+ operations work properly!
+ dimid_to_bittype: assigns dimids to bit types.
+ i-th element of the array contains the bit type
+ of dimension id i
+ extract: functions to extract certain fields from a packet.
+ the function at position i of the array returns
+ the entry in a packet that corresponds to
+ dimension id i (i.e. the source ip of the packet)
+ len: length of the dim2btype and extract array
+ copycon: constructor function
+ destroy: destructor function
+ match: match executor function
+ target: target executor function
+ eq: equality function to compare rules
+ maxmem: maximum allowed memory consumption
+ possible errors: HE_LOW_MEMORY, HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_init(const __u8 dimid_to_bittype[], const hipac_extract_t extract[],
+ const __u8 len, hipac_copy_constructor_t copycon,
+ hipac_destroy_exec_t destroy, hipac_match_exec_t match,
+ hipac_target_exec_t target, hipac_eq_exec_t eq,
+ const __u64 maxmem);
+
+
+/* free all hipac data structures;
+ MUST be called once in the end
+ attention: make sure there are no external accesses to hipac
+ data structures taking place anymore! */
+void
+hipac_exit(void);
+
+
+/* return new hipac data structure
+ name: name of the public chain
+ name_intern: name of the internal dimtree chain
+ policy: initial policy
+ origin: bitvector uniq to this data structure
+ hipac: pointer to a pointer to the resulting hipac data
+ structure. use as first argument to hipac_match()
+ possible errors: HE_LOW_MEMORY, HE_NATIVE_CHAIN_EXISTS,
+ HE_CHAIN_EXISTS, HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_new(const char *name, const char* name_intern, const __u8 policy,
+ const __u32 origin, void **hipac);
+
+
+/* set maximum amount of memory the hipac data structures are
+ allowed to occupy. return LOW_MEMORY if 'mem' is lower than
+ currently allocated memory
+ possible errors: HE_LOW_MEMORY */
+hipac_error
+hipac_set_maxmem(const __u64 mem);
+
+
+/* get maximum amount of memory the hipac data structures are
+ allowed to occupy. */
+__u64
+hipac_get_maxmem(void);
+
+
+/* set policy of chain with name 'name' to 'policy'.
+ possible errors: HE_CHAIN_NOT_EXISTENT, HE_CHAIN_IS_USERDEFINED,
+ HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_set_policy(const char *name, const __u8 policy);
+
+
+/* get policy of chain with name 'name' and write it to 'result'.
+ possible errors: HE_CHAIN_NOT_EXISTENT, HE_CHAIN_IS_USERDEFINED,
+ HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_get_policy(const char *name, __u8 *result);
+
+
+/* create new user-defined chain with name 'name'.
+ possible errors: HE_LOW_MEMORY, HE_CHAIN_EXISTS,
+ HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_new_chain(const char* name);
+
+
+/* delete all rules in chain with name 'name'.
+ if 'name' is NULL all rules in all chains are deleted
+ possible errors: HE_LOW_MEMORY, HE_CHAIN_NOT_EXISTENT,
+ HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_flush_chain(const char *name);
+
+
+/* delete user-defined chain with name 'name'.
+ if 'name' is NULL delete all chains that are empty
+ and not referenced from other chains.
+ possible errors: HE_CHAIN_NOT_EXISTENT, HE_CHAIN_IS_NATIVE,
+ HE_CHAIN_NOT_EMPTY, HE_CHAIN_IS_REFERENCED */
+hipac_error
+hipac_delete_chain(const char *name);
+
+
+/* rename chain with name 'name' to 'new_name'.
+ possible errors: HE_LOW_MEMORY, HE_CHAIN_EXISTS,
+ HE_CHAIN_NOT_EXISTENT, HE_CHAIN_IS_NATIVE,
+ HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_rename_chain(const char *name, const char *new_name);
+
+
+/* get an array of hipac_chain_info structs containing required infos
+ for a rule listing of chain with name 'name'. if 'name' is NULL
+ return infos for all chains. 'len' specifies the length of the
+ returned struct hipac_chain_info array.
+ attention: don't forget to free the struct hipac_chain_info array
+ after the rule listing via hipac_free_chain_infos()!
+ possible errors: HE_LOW_MEMORY, HE_CHAIN_NOT_EXISTENT,
+ HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_get_chain_infos(const char *name, struct hipac_chain_info **inf,
+ __u32 *len);
+
+
+/* free array of hipac_chain_info structs that has been allocated
+ before via hipac_get_chain_infos().
+ possible errors: HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_free_chain_infos(struct hipac_chain_info *inf);
+
+
+/* get next hipac_rule 'next' of previous hipac_rule 'prev'.
+ with this function you can walk over the chain during rule listing.
+ to get the first hipac_rule of a chain, set 'prev' to NULL.
+ when the end of the chain is reached or the chain is empty the
+ hipac_error HE_RULE_NOT_EXISTENT is returned.
+ attention: during rule listing of a chain hipac_get_next_rule()
+ must always be called until finally HE_RULE_NOT_EXISTENT
+ is returned!
+ possible errors: HE_LOW_MEMORY, HE_RULE_NOT_EXISTENT,
+ IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_get_next_rule(const struct hipac_chain_info *inf,
+ struct hipac_rule *prev,
+ struct hipac_rule **next);
+
+
+/* append hipac_rule 'rule' to chain with name 'name'.
+ 'rule->pos' is set to the position of the last rule
+ in the chain + 1.
+ possible errors: HE_LOW_MEMORY, HE_CHAIN_NOT_EXISTENT,
+ HE_LOOP_DETECTED, HE_REC_LIMIT,
+ HE_RULE_ORIGIN_MISMATCH, HE_RULE_PREFIX_MISMATCH,
+ HE_TARGET_CHAIN_NOT_EXISTENT,
+ HE_TARGET_CHAIN_IS_NATIVE,
+ HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_append(const char *name, const struct hipac_rule *rule);
+
+
+/* insert hipac_rule 'rule' at position 'rule->pos' into chain
+ with name 'name'.
+ possible errors: HE_LOW_MEMORY, HE_CHAIN_NOT_EXISTENT,
+ HE_LOOP_DETECTED, HE_REC_LIMIT,
+ HE_RULE_ORIGIN_MISMATCH, HE_RULE_PREFIX_MISMATCH,
+ HE_TARGET_CHAIN_NOT_EXISTENT,
+ HE_TARGET_CHAIN_IS_NATIVE,
+ HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_insert(const char *name, const struct hipac_rule *rule);
+
+
+/* delete hipac_rule with position 'pos' from chain with name 'name'.
+ possible errors: HE_LOW_MEMORY, HE_CHAIN_NOT_EXISTENT,
+ HE_RULE_NOT_EXISTENT, HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_delete_pos(const char *name, const __u32 pos);
+
+
+/* find the first rule in chain with name 'name' that equals to
+ hipac_rule 'rule' and delete it.
+ possible errors: HE_LOW_MEMORY, HE_CHAIN_NOT_EXISTENT,
+ HE_RULE_NOT_EXISTENT, HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_delete(const char *name, const struct hipac_rule *rule);
+
+
+/* replace rule with position 'rule->pos' in chain with name 'name'
+ with hipac_rule 'rule'.
+ possible errors: HE_LOW_MEMORY, HE_CHAIN_NOT_EXISTENT,
+ HE_RULE_NOT_EXISTENT, HE_LOOP_DETECTED,
+ HE_REC_LIMIT, HE_RULE_ORIGIN_MISMATCH,
+ HE_RULE_PREFIX_MISMATCH,
+ HE_TARGET_CHAIN_NOT_EXISTENT,
+ HE_TARGET_CHAIN_IS_NATIVE,
+ HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_replace(const char *name, const struct hipac_rule *rule);
+
+
+/* match packet and return the terminal packet action which is either
+ TARGET_ACCEPT or TARGET_DROP; note that this is the only function
+ that may be used in parallel with other functions of the hipac API */
+hipac_target_t
+hipac_match(void *hipac, const void *packet);
+
+
+
+/*
+ * hipac statistics: data structures
+ */
+
+/* rlp statistics
+ total_mem_tight: current overall memory consumption in bytes
+ in terms of how much has been requested
+ total_mem_real: current overall memory consumption in bytes
+ in terms of how much has actually been
+ allocated
+ rlp_mem_tight: current memory consumption in bytes of all
+ rlps (not including termrule blocks) in
+ terms of how much has been requested
+ rlp_mem_real: current memory consumption in bytes of all
+ rlps (not including termrule blocks) in
+ terms of how much has actually been
+ allocated
+ termrule_mem_tight: current memory consumption in bytes of all
+ termrule blocks in terms of how much has
+ been requested
+ termrule_mem_real: current memory consumption in bytes of all
+ termrule blocks in terms of how much has
+ actually been allocated
+ rlp_num: number of rlps
+ rlp_dimid_num: mapping with [i] containing the number of
+ rlps in dimension i
+ rlp_depth_num: mapping with [i] containing the number of
+ rlps in depth i
+ termrule_num: number of termrule blocks
+ termrule_ptr_num: number of entries in all termrule blocks
+ keys_num: number of keys in all rlps
+ rlp_dimid_keys_stat: array of distributions with [i][j]
+ containing the number of rlps in
+ dimension i with 2^(i - 1) <= keys < 2^i
+ termptr_num: number of terminal pointers (of all rlps)
+ termptr_dimid_num: mapping with [i] containing the number of
+ terminal pointers in dimension i
+ termptr_depth_num: mapping with [i] containing the number of
+ terminal pointers in depth i
+ nontermptr_num: number of non-terminal pointers (of all
+ rlps)
+ nontermptr_dimid_num: mapping with [i] containing the number of
+ non-terminal pointers in dimension i
+ nontermptr_depth_num: mapping with [i] containing the number of
+ non-terminal pointers in depth i
+ dt_elem_num: number of elementary interval structures
+ dt_elem_ptr_num: number of rules in all elementary interval
+ structures
+ dt_elem_stat: distribution with [i] containing the number
+ of elementary interval structures with
+ 2^(i - 1) <= rules < 2^i */
+struct hipac_rlp_stat
+{
+ __u64 total_mem_tight;
+ __u64 total_mem_real;
+ __u64 rlp_mem_tight;
+ __u64 rlp_mem_real;
+ __u64 termrule_mem_tight;
+ __u64 termrule_mem_real;
+ __u32 rlp_num;
+ __u32 rlp_dimid_num[16];
+ __u32 rlp_depth_num[16];
+ __u32 termrule_num;
+ __u32 termrule_ptr_num;
+ __u32 keys_num;
+ __u32 rlp_dimid_keys_stat[16][18];
+ __u32 termptr_num;
+ __u32 termptr_dimid_num[16];
+ __u32 termptr_depth_num[16];
+ __u32 nontermptr_num;
+ __u32 nontermptr_dimid_num[16];
+ __u32 nontermptr_depth_num[16];
+ __u32 dt_elem_num;
+ __u32 dt_elem_ptr_num;
+ __u32 dt_elem_stat[16];
+};
+
+/* dimtree statistics
+ chain_mem_tight: current memory consumption in bytes of
+ a dimtree chain including the rules in
+ terms of how much has been requested
+ chain_mem_real: current memory consumption in bytes of
+ a dimtree chain including the rules in
+ terms of how much has actually been
+ allocated
+ rule_num: number of dimtree rules
+ rules_with_exec_matches: number of dimtree rules containing at
+ least one function based match
+ rules_with_exec_target: number of dimtree rules containing
+ a function based target
+ rules_same_pos_stat: distribution with [i] containing number
+ of dimtree rule series of length
+ >= 2^(i - 1) and < 2^i where all rules
+ share the same position
+ dt_match_stat: mapping with [i] containing the number
+ of dimtree rules having i non-wildcard
+ matches */
+struct hipac_dimtree_stat
+{
+ __u64 chain_mem_tight;
+ __u64 chain_mem_real;
+ __u32 rule_num;
+ __u32 rules_with_exec_matches;
+ __u32 rules_with_exec_target;
+ __u32 rules_same_pos_stat[16];
+ __u32 dt_match_stat[16];
+};
+
+/* hipac memory statistics
+ total_mem_tight: current overall memory consumption in
+ bytes in terms of how much has been
+ requested
+ total_mem_real: current overall memory consumption in
+ bytes in terms of how much has
+ actually been allocated
+ memhash_elem_num: number of objects for which memory
+ has been requested
+ memhash_len: number of buckets in the memory hash
+ memhash_smallest_bucket_len: number of objects in the smallest
+ bucket of the memory hash
+ memhash_biggest_bucket_len: number of objects in the biggest
+ bucket of the memory hash
+ memhash_bucket_stat: distribution with [i] containing the
+ number of buckets with
+ 2^(i - 1) <= objects < 2^i */
+struct hipac_mem_stat
+{
+ __u64 total_mem_tight;
+ __u64 total_mem_real;
+ __u32 memhash_elem_num;
+ __u32 memhash_len;
+ __u32 memhash_smallest_bucket_len;
+ __u32 memhash_biggest_bucket_len;
+ __u32 memhash_bucket_stat[16];
+
+};
+
+
+/* hipac chain statistics
+ mem_tight: current memory consumption in bytes of all
+ hipac chains including the rules in terms of
+ how much has been requested
+ mem_real: current memory consumption in bytes of all
+ hipac chains including the rules in terms of
+ how much has actually been allocated
+ chain_num: number of chains
+ rule_num: number of rules in all chains
+ paths_stat: distribution with [i] containing the number of
+ chains with 2^(i - 1) <= paths < 2^i
+ incoming_stat: distribution with [i] containing the number of
+ chains with 2^(i - 1) <= incoming edges < 2^i
+ outgoing_stat: distribution with [i] containing the number of
+ chains with 2^(i - 1) <= outgoing edges < 2^i */
+struct hipac_chain_stat
+{
+ __u64 mem_tight;
+ __u64 mem_real;
+ __u32 chain_num;
+ __u32 rule_num;
+ __u32 prefix_stat[16];
+ __u32 incoming_stat[16];
+ __u32 outgoing_stat[16];
+};
+
+
+/* hipac rule statistics
+ rule_num: number of rules
+ exec_match_num: number of rules with exec_matches
+ exec_target_num: number of rules with exec_target
+ jump_target_num: number of rules with jump target
+ return_target_num: number of rules with return target
+ hipac_match_stat: mapping with [i] containing the number
+ of rules with i hipac_matches
+ inv_rules_stat: mapping with [i] containing the number
+ of rules with i inversion flags */
+struct hipac_rule_stat
+{
+ __u32 rule_num;
+ __u32 exec_match_num;
+ __u32 exec_target_num;
+ __u32 jump_target_num;
+ __u32 return_target_num;
+ __u32 hipac_match_stat[16];
+ __u32 inv_rules_stat[16];
+};
+
+
+/* hipac user statistics
+ total_mem_tight: current memory consumption in bytes in terms
+ of how much has been requested
+ total_mem_real: current memory consumption in bytes in terms
+ of how much has actually been allocated
+ chain_num: number of chains
+ rule_num: number of rules in all chains */
+struct hipac_user_stat
+{
+ __u64 total_mem_tight;
+ __u64 total_mem_real;
+ __u32 chain_num;
+ __u32 rule_num;
+};
+
+
+
+/*
+ * hipac statistics: functions
+ */
+
+/* get rlp statistics, i.e. the statistics of the internal
+ rlp representation of all rules reachable from the root chain
+ represented by the 'hipac' pointer
+ possible errors: HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_get_rlp_stat(void *hipac, struct hipac_rlp_stat *stat);
+
+
+/* get dimtree statistics, i.e. the statistics of the internal
+ chain representation of all rules reachable from the root chain
+ represented by the 'hipac' pointer
+ possible errors: HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_get_dimtree_stat(void *hipac, struct hipac_dimtree_stat *stat);
+
+
+/* get hipac memory statistics
+ possible errors: HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_get_mem_stat(struct hipac_mem_stat *stat);
+
+
+/* get hipac chain statistics
+ possible errors: HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_get_chain_stat(struct hipac_chain_stat *stat);
+
+
+/* get hipac rule statistics
+ returned statistics constains all rules of those chains that are
+ reachable from the root chain represented by the 'hipac' pointer
+ possible errors: HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_get_rule_stat(void *hipac, struct hipac_rule_stat *stat);
+
+
+/* get hipac user statistics
+ possible errors: HE_IMPOSSIBLE_CONDITION */
+hipac_error
+hipac_get_user_stat(struct hipac_user_stat *stat);
+
+#ifdef DEBUG
+/* per object debugging: selection is done by an externally defined variable
+ hipac_debug which is a bit vector of DEBUG_* */
+# define DEBUG_HIPAC 0x01
+# define DEBUG_DIMTREE 0x02
+# define DEBUG_RLP 0x04
+# define DEBUG_IHASH 0x08
+# define DEBUG_GLOBAL 0x10
+ extern unsigned hipac_debug;
+
+hipac_error
+hipac_get_dt_rule_ptrs(const char *name, const __u32 pos, void **res);
+
+__u8
+dt_rules_have_same_position(void *hipac, void *dt_start, void *dt_rule);
+#endif
+
+#endif
diff -urN nf-hipac/user/libnfhipac.c nfhipac/user/libnfhipac.c
--- nf-hipac/user/libnfhipac.c 1970-01-01 08:00:00.000000000 +0800
+++ nfhipac/user/libnfhipac.c 2014-11-21 11:09:14.000000000 +0800
@@ -0,0 +1,399 @@
+/*
+ * High performance packet classification
+ *
+ *
+ * (c) 2004-2005 MARA Systems AB
+ * +-----------------------------+
+ * | Michael Bellion |
+ * | |
+ * +-----------------------------+
+ *
+ * (c) 2002-2003 hipac core team :
+ * +---------------------------+--------------------------+
+ * | Michael Bellion | Thomas Heinz |
+ * | | |
+ * +---------------------------+--------------------------+
+ *
+ * Licenced under the GNU General Public Licence, version 2.
+ */
+
+
+#include
+#include
+#include
+#include
+#include
+#include "libnfhipac.h"
+#include "hipac.h"
+#include "nfhp_com.h"
+
+#ifndef PROC_SYS_MODPROBE
+# define PROC_SYS_MODPROBE "/proc/sys/kernel/modprobe"
+#endif
+
+#define MIN(a, b) ((a) <= (b) ? (a) : (b))
+#define IS_LIST_CMD(cmd) ((cmd) == CMD_LIST)
+#define CMD_SIZE 4096
+#define RECV_SIZE 65536
+
+struct nfhp_cmd *
+nlhp_new_cmd(int cmd, int *size)
+{
+ struct nlmsghdr *msg;
+ struct nfhp_cmd *c;
+
+ if (cmd > CMD_MAX) {
+ return NULL;
+ }
+
+ msg = calloc(1, CMD_SIZE);
+ if (msg == NULL) {
+ return NULL;
+ }
+ if (size != NULL){
+ *size = CMD_SIZE - NLMSG_LENGTH(0);
+ }
+ c = NLMSG_DATA(msg);
+ c->cmd = cmd;
+ return c;
+}
+
+void
+nlhp_free_cmd(struct nfhp_cmd *cmd)
+{
+ if (cmd == NULL) {
+ return;
+ }
+ free((char *) cmd - NLMSG_LENGTH(0));
+}
+
+struct nfhp_cmd *
+nlhp_copy_cmd(struct nfhp_cmd *cmd, int size)
+{
+ struct nlmsghdr *clone;
+
+ if (cmd == NULL || size < sizeof(*cmd)) {
+ return NULL;
+ }
+ clone = malloc(NLMSG_SPACE(size));
+ if (clone == NULL) {
+ return NULL;
+ }
+ memcpy(clone, (char *) cmd - NLMSG_LENGTH(0), NLMSG_SPACE(size));
+ return NLMSG_DATA(clone);
+}
+
+/* read location of modprobe binary from procfs */
+static char *
+get_modprobe_cmd(void)
+{
+ int procfile, len;
+ char *ret;
+
+ procfile = open(PROC_SYS_MODPROBE, O_RDONLY);
+ if (procfile < 0) {
+ return NULL;
+ }
+
+ ret = malloc(1024);
+ if (ret == NULL) {
+ goto error;
+ }
+ switch (read(procfile, ret, 1024)) {
+ case -1: goto error;
+ case 1024: goto error;
+ }
+ len = strlen(ret);
+ if (ret[len - 1] == '\n') {
+ ret[len - 1] = '\0';
+ }
+ close(procfile);
+ return ret;
+
+ error:
+ free(ret);
+ close(procfile);
+ return NULL;
+}
+
+/* try to load module specified by modname */
+static int
+modprobe(char *modname)
+{
+ char *modprobe_cmd = NULL;
+ char *argv[3];
+
+ modprobe_cmd = get_modprobe_cmd();
+ if (modprobe_cmd == NULL) {
+ return -1;
+ }
+ switch (fork()) {
+ case 0:
+ argv[0] = modprobe_cmd;
+ argv[1] = modname;
+ argv[2] = NULL;
+ execv(argv[0], argv);
+ /* usually not reached */
+ exit(0);
+
+ case -1:
+ return -1;
+
+ default: /* parent */
+ wait(NULL);
+ }
+ free(modprobe_cmd);
+ return 0;
+}
+
+static inline int
+rcv_kern(int sock, void *buf, int len, struct sockaddr_nl *addr,
+ socklen_t *addrlen)
+{
+ fd_set set;
+ int stat;
+
+ FD_SET(sock, &set);
+ stat = select(sock + 1, &set, NULL, NULL, NULL);
+ FD_ZERO(&set);
+ if (stat == 0) {
+ return -2;
+ } else if (stat == -1) {
+ return -1;
+ }
+ return recvfrom(sock, buf, len, 0, (struct sockaddr *) addr, addrlen);
+}
+
+static inline int
+check_reply(struct nlmsghdr *msg, int size, int min_size)
+{
+ int pl;
+
+ if (size < 0) {
+ return size;
+ }
+ if (size <= NLMSG_LENGTH(0)) {
+ fprintf(stderr, "internal error: reply has empty payload\n");
+ return -5;
+ }
+ pl = NLMSG_PAYLOAD(msg, 0);
+ if (pl < min_size) {
+ fprintf(stderr, "internal error: payload too short\n");
+ return -5;
+ }
+ if (!NLMSG_OK(msg, size)) {
+ fprintf(stderr, "internal error: reply broken\n");
+ return -5;
+ }
+ return pl;
+}
+
+int
+nlhp_send_cmd(struct nfhp_cmd *cmd, int cmd_size,
+ int (*process_list) (const void *data, int len),
+ int *err)
+{
+ int rcvbuf_size;
+ struct sockaddr_nl addr = {0};
+ socklen_t addrlen = sizeof(addr);
+ struct nlmsghdr *msg, *reply;
+ int sock, stat, pl;
+ void *data;
+
+ if (cmd == NULL || err == NULL || cmd_size < sizeof(*cmd) ||
+ (IS_LIST_CMD(cmd->cmd) && process_list == NULL)) {
+ return -5;
+ }
+
+ stat = 0;
+ *err = 0;
+
+ /* open socket */
+ if ((sock = socket(PF_NETLINK, SOCK_RAW, NLHP_PROTO)) == -1) {
+ return -1;
+ }
+ addr.nl_family = AF_NETLINK;
+ if (bind(sock, (struct sockaddr *) &addr, addrlen) == -1) {
+ stat = -1;
+ goto end;
+ }
+
+ /* fill netlink header */
+ msg = (struct nlmsghdr *) ((char *) cmd - NLMSG_LENGTH(0));
+ msg->nlmsg_len = NLMSG_LENGTH(cmd_size);
+ msg->nlmsg_type = NLHP_TYPE;
+ msg->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
+ msg->nlmsg_pid = getpid();
+ if (!NLMSG_OK(msg, NLMSG_LENGTH(cmd_size))) {
+ fprintf(stderr, "internal error: message built failed\n");
+ stat = -5;
+ goto end;
+ }
+
+ /* send request */
+ if (sendto(sock, msg, NLMSG_SPACE(cmd_size), 0,
+ (struct sockaddr *) &addr, addrlen) == -1) {
+ modprobe("nf_hipac");
+ if (sendto(sock, msg, NLMSG_SPACE(cmd_size),
+ 0, (struct sockaddr *) &addr, addrlen) == -1) {
+ stat = -1;
+ goto end;
+ }
+ }
+
+ if (!IS_LIST_CMD(cmd->cmd))
+ rcvbuf_size = NLMSG_SPACE(sizeof(int32_t));
+ else rcvbuf_size = RECV_SIZE;
+
+ /* receive reply */
+ reply = malloc(rcvbuf_size);
+ if (reply == NULL) {
+ stat = -3;
+ goto end;
+ }
+
+ /* non-list command */
+ if (!IS_LIST_CMD(cmd->cmd)) {
+ int size;
+ size = rcv_kern(sock, reply, rcvbuf_size, &addr, &addrlen);
+ pl = check_reply(reply, size, sizeof(int32_t));
+ if (pl < 0) {
+ stat = pl;
+ goto end_free;
+ }
+ if (reply->nlmsg_type == NLMSG_ERROR) {
+ *err = *(int32_t *) NLMSG_DATA(reply);
+ goto end_free;
+ } else {
+ fprintf(stderr, "internal error: netlink error\n");
+ stat = -5;
+ goto end_free;
+ }
+ } else {
+ /* list callback */
+ while (1) {
+ int size;
+ size = rcv_kern(sock, reply, rcvbuf_size,
+ &addr, &addrlen);
+ data = NLMSG_DATA(reply);
+ pl = check_reply(reply, size, sizeof(int32_t));
+ if (pl < 0) {
+ stat = pl;
+ break;
+ }
+ if (pl == sizeof(int32_t) &&
+ (reply->nlmsg_type == NLMSG_DONE ||
+ reply->nlmsg_type == NLMSG_ERROR)) {
+ if (*(int32_t *) data < 0) {
+ /* listing ends abnormally */
+ *err = *(int32_t *) data;
+ } else {
+ *err = 0;
+ }
+ break;
+ }
+ if (pl < MIN(sizeof(struct nfhp_list_chain),
+ sizeof(struct nfhp_list_rule))) {
+ fprintf(stderr, "internal error: "
+ "payload too short\n");
+ stat = -5;
+ break;
+ }
+ if (process_list(data, pl) < 0) {
+ stat = -4;
+ break;
+ }
+ if (reply->nlmsg_type == NLMSG_DONE) {
+ *err = 0;
+ break;
+ }
+ }
+ }
+
+ end_free:
+ free(reply);
+ end:
+ close(sock);
+ return stat;
+}
+
+const char *
+nlhp_error(int err)
+{
+ static const char *errmsg[] =
+ {
+ /* hipac_error */
+ [-HE_OK] = "Operation successful",
+ [-HE_IMPOSSIBLE_CONDITION] = "Impossible condition (e.g. illegal function call)",
+ [-HE_LOW_MEMORY] = "Not enough memory available",
+ [-HE_CHAIN_EXISTS] = "Chain exists already",
+ [-HE_CHAIN_NOT_EXISTENT] = "Chain does not exist",
+ [-HE_CHAIN_IS_EMPTY] = "Chain is empty",
+ [-HE_CHAIN_NOT_EMPTY] = "Chain is not empty",
+ [-HE_CHAIN_IS_USERDEFINED] = "Chain is user defined",
+ [-HE_CHAIN_IS_CONNECTED] = "Chain is connected",
+ [-HE_CHAIN_IS_REFERENCED] = "Chain is still referenced",
+ [-HE_CHAIN_NOT_NATIVE] = "Chain is not native",
+ [-HE_CHAIN_IS_NATIVE] = "Chain is native",
+ [-HE_RULE_NOT_EXISTENT] = "Rule does not exist",
+ [-HE_RULE_ORIGIN_MISMATCH] = "Rule origin mismatch",
+ [-HE_RULE_PREFIX_MISMATCH] = "Rule prefix mismatch",
+ [-HE_LOOP_DETECTED] = "Loop detected",
+ [-HE_REC_LIMIT] = "Chain depth limit reached",
+ [-HE_TARGET_CHAIN_NOT_EXISTENT] = "Target chain does not exist",
+ [-HE_TARGET_CHAIN_IS_NATIVE] = "Target chain is native",
+ [-HE_NATIVE_CHAIN_EXISTS] = "Native chain exists",
+
+ /* nfhipac_error */
+ [-NFHE_INDEX] = "Unable to retrieve unused ifindex",
+ [-NFHE_NOMSG] = "Incorrect message format",
+ [-NFHE_CMD] = "Invalid command",
+ [-NFHE_LABEL] = "Empty chain label",
+ [-NFHE_NLABEL] = "Empty new chain label",
+ [-NFHE_POLICY] = "Invalid policy",
+ [-NFHE_ACTION] = "Invalid action",
+ [-NFHE_NMCT] = "Invalid native match count",
+ [-NFHE_IEOFF] = "Invalid target_offset/next_offset in ipt_entry",
+ [-NFHE_SORT] = "Native matches not sorted or dimid duplicate",
+ [-NFHE_MINT] = "Invalid interval in native match",
+ [-NFHE_DEVA] = "Native interface match but no corresponding interface name",
+ [-NFHE_DEVB] = "Interface name but no corresponding native interface match",
+ [-NFHE_FRAG] = "Invalid fragment match",
+ [-NFHE_PROTO] = "Invalid protocol match",
+ [-NFHE_SYN] = "Invalid syn match",
+ [-NFHE_STATE] = "Invalid state match",
+ [-NFHE_TCP] = "tcp match dependency failure",
+ [-NFHE_TCPUDP] = "tcp or udp match dependency failure",
+ [-NFHE_ICMP] = "icmp match dependency failure",
+ [-NFHE_CMPMIS] = "Missing cmp_len array",
+ [-NFHE_CMPSH] = "cmp_len array too short",
+ [-NFHE_CMPLA] = "cmp_len array contains a value larger than the corresponding ipt match/target size",
+ [-NFHE_ORIGIN] = "Illegal combination of matches (no valid origin)",
+ [-NFHE_IPTMSZ] = "Invalid ipt match size",
+ [-NFHE_IPTMCH] = "checkentry fails for ipt match",
+ [-NFHE_IPTTMI] = "Missing ipt target",
+ [-NFHE_IPTTSZ] = "Invalid ipt target size",
+ [-NFHE_IPTTCH] = "checkentry fails for ipt target",
+ [-NFHE_TOFF] = "Invalid target_offset",
+ [-NFHE_CHAINE] = "Empty chain name",
+ [-NFHE_CHAINL] = "Chain name too long",
+ [-NFHE_CT] = "Kernel does not have support for connection tracking, please recompile",
+ [-NFHE_CTHELP] = "Unable to load connection tracking helper module (nf_hipac_cthelp.o)",
+ [-NFHE_ILL] = "Illegal condition",
+ [-NFHE_IMPL] = "Feature not yet implemented"
+ };
+ static char buf[30];
+ int32_t n;
+
+ if (err <= NFHE_SYSOFF) {
+ return strerror(NFHE_TO_ERRNO(err));
+ }
+
+ n = -err;
+ if (n < 0 || n >= sizeof(errmsg) / sizeof(*errmsg) ||
+ errmsg[n] == NULL) {
+ snprintf(buf, sizeof(buf), "Unkown error %d\n", err);
+ return buf;
+ }
+ return errmsg[n];
+}
diff -urN nf-hipac/user/libnfhipac.h nfhipac/user/libnfhipac.h
--- nf-hipac/user/libnfhipac.h 1970-01-01 08:00:00.000000000 +0800
+++ nfhipac/user/libnfhipac.h 2014-11-21 11:09:14.000000000 +0800
@@ -0,0 +1,90 @@
+/*
+ * High performance packet classification
+ *
+ *
+ * (c) 2004-2005 MARA Systems AB
+ * +-----------------------------+
+ * | Michael Bellion |
+ * | |
+ * +-----------------------------+
+ *
+ * (c) 2002-2003 hipac core team :
+ * +---------------------------+--------------------------+
+ * | Michael Bellion | Thomas Heinz |
+ * | | |
+ * +---------------------------+--------------------------+
+ *
+ * Licenced under the GNU General Public Licence, version 2.
+ */
+
+
+#ifndef _LIBNFHIPAC_H
+#define _LIBNFHIPAC_H
+
+#include
+#if defined(__GLIBC__) && __GLIBC__ == 2
+# include
+# include
+# include
+#else /* libc5 */
+# include
+# include
+# include
+#endif
+#include "nfhp_com.h"
+
+/*
+ return empty command struct s where only the command type is initialized by
+ cmd; the size of s which is at least (127 KB) - (NLMSG_LENGTH(0) bytes) is
+ stored in *size if size is not NULL; on error NULL is returned;
+ IMPORTANT: __never__ call free, realloc or similar functions for s;
+ use nlhp_free_cmd instead to free the memory allocated for s
+ by nlhp_new_cmd;
+ realloc is never needed because the size of s is chosen to
+ be large enough (don't forget that the size of skb->data is
+ limited to 128 KB)
+*/
+struct nfhp_cmd *
+nlhp_new_cmd(int cmd, int *size);
+
+/* free cmd which must have been allocated by nlhp_new_cmd */
+void
+nlhp_free_cmd(struct nfhp_cmd *cmd);
+
+/* returns a copy of cmd; the size of cmd is passed via size */
+struct nfhp_cmd *
+nlhp_copy_cmd(struct nfhp_cmd *cmd, int size);
+
+/*
+ send command request to nf_hipac kernel module (necessary parts in cmd
+ must be filled in appropriately since cmd is not checked or modified by
+ the library); the size of cmd is passed via cmd_size; if cmd->cmd is
+ CMD_LIST then process_list must point to a callback function which
+ is called for each packet received during a list request;
+ otherwise process_list may be NULL; when called for the first time
+ process_list can assume that data points to a struct nfhp_list_chain;
+ process_list should return 0 on success and a value < 0 on error;
+ each packet may contain several struct nfhp_list_chain and struct
+ nfhp_list_rule arranged in the way described in nfhp_com.h;
+
+ if nlhp_send_cmd returns 0 then see nlhp_error(*err) for an error
+ description; *err == 0 indicates a successful operation;
+
+ return values: 0: see *err
+ -1: system error (see perror or errno)
+ -2: read timeout
+ -3: not enough memory available
+ -4: process_list failed
+ -5: other error
+*/
+int
+nlhp_send_cmd(struct nfhp_cmd *cmd, int cmd_size,
+ int (*process_list) (const void *data, int len), int *err);
+
+/*
+ * return string description of err if available; otherwise NULL is returned;
+ */
+const char *
+nlhp_error(int err);
+
+#endif
diff -urN nf-hipac/user/Makefile nfhipac/user/Makefile
--- nf-hipac/user/Makefile 1970-01-01 08:00:00.000000000 +0800
+++ nfhipac/user/Makefile 2014-11-21 12:46:19.000000000 +0800
@@ -0,0 +1,75 @@
+#
+# use make [target] [options]
+#
+# where options
+# - PREFIX: prefix for install
+# - IPT_LIB_DIR: location of iptables userspace modules
+# - IPTABLES_1_2: compile for iptables 1.2.x [true|false]
+#
+
+PREFIX := /usr/local
+IPT_LIB_DIR := /lib/iptables
+IPTABLES_1_2 := false
+
+DEBUG := false
+
+LIBDIR := $(PREFIX)/lib
+BINDIR := $(PREFIX)/sbin
+
+SRC := nf-hipac.c nf-hipac-core.c libnfhipac.c
+LIB := libnfhipac.so
+BIN := nf-hipac
+
+CC := gcc
+INSTALL := install
+LDCONFIG := ldconfig
+
+CFLAGS := -Wall -Wno-long-long -Wstrict-prototypes -Wno-trigraphs \
+ -Wunused -fno-strict-aliasing -fno-common -rdynamic \
+ -D IPT_LIB_DIR=\"$(IPT_LIB_DIR)\"
+
+ifeq ($(DEBUG), true)
+CFLAGS += -g
+BINFLAGS := $(CFLAGS) -Wl,-rpath,$(shell pwd)
+else
+CFLAGS += -O2 -fomit-frame-pointer
+BINFLAGS := $(CFLAGS)
+endif
+
+OFLAGS := $(CFLAGS) -fPIC
+SFLAGS := $(CFLAGS) -shared -nostartfiles
+
+
+.PHONY: install clean
+
+all: $(BIN)
+
+install: $(BIN)
+ mkdir -p "$(BINDIR)"
+ $(INSTALL) -o root -g root -m 755 $(BIN) $(BINDIR)
+ mkdir -p "$(LIBDIR)"
+ $(INSTALL) -o root -g root -m 755 $(LIB) $(LIBDIR)
+ $(LDCONFIG)
+
+nf-hipac: %:%.o -ldl $(LIB) nf-hipac-core.o
+ $(CC) $(BINFLAGS) $^ -o $@
+
+clean:
+ -@ rm -f $(SRC:.c=.d) $(SRC:.c=.o) $(BIN) $(LIB)
+
+%.so: %.o
+ $(CC) $(SFLAGS) $(filter %.o %.so, $^) -o $@
+
+%.o: %.c
+ $(CC) $(OFLAGS) -c $< -o $@
+
+%.d: %.c
+ @ set -e; $(CC) -MM $(CFLAGS) $< \
+ | sed 's/\($*\)\.o[ :]*/\1.o $@ : /g' > $@; \
+ [ -s $@ ] || rm -f $@
+
+ifneq ($(MAKECMDGOALS), clean)
+ifneq ($(MAKECMDGOALS), etags)
+-include $(SRC:.c=.d)
+endif
+endif
diff -urN nf-hipac/user/mode.h nfhipac/user/mode.h
--- nf-hipac/user/mode.h 1970-01-01 08:00:00.000000000 +0800
+++ nfhipac/user/mode.h 2014-11-21 11:17:40.000000000 +0800
@@ -0,0 +1,231 @@
+/*
+ * High performance packet classification
+ *
+ *
+ * (c) 2004-2005 MARA Systems AB
+ * +-----------------------------+
+ * | Michael Bellion |
+ * | |
+ * +-----------------------------+
+ *
+ * (c) 2002-2003 hipac core team :
+ * +---------------------------+--------------------------+
+ * | Michael Bellion | Thomas Heinz |
+ * | | |
+ * +---------------------------+--------------------------+
+ *
+ * Licenced under the GNU General Public Licence, version 2.
+ */
+
+
+#ifndef _MODE_H
+#define _MODE_H
+
+#include // offsetof
+
+/* maximal number of bytes allocatable by mini_alloc */
+#define MINI_ALLOC_MAX 131072
+
+
+/*
+ * NEVER use big_alloc and big_free. Use hp_alloc and hp_free instead.
+ * The only exceptions to this rule is the implementation of hp_alloc,
+ * hp_realloc and hp_free.
+ *
+ * mini_alloc and mini_free can be used for small (<= MINI_ALLOC_MAX bytes)
+ * data structures if one wants to avoid the overhead of hp_alloc and hp_free
+ */
+#ifdef __KERNEL__
+#include
+#else
+#include
+#endif
+
+static inline unsigned
+big_alloc_size(unsigned size)
+{
+ return size == 0 ? 0 : (((size - 1) + PAGE_SIZE) & ~(PAGE_SIZE - 1));
+}
+
+
+#ifdef __KERNEL__
+/*
+ * Kernel space
+ */
+#include
+#include // ULONG_MAX
+#include // smp_num_cpus, cpu_number_map, smp_processor_id
+#include // Read Copy Update: sychronize_rcu
+#include // __cacheline_aligned
+#include // NF_ACCEPT, NF_DROP
+#include
+#include
+
+#define assert(as) do {} while (0)
+#define printf(str, args...) printk(str , ## args)
+
+static inline unsigned
+mini_alloc_size(unsigned size)
+{
+ unsigned int s;
+#define CACHE(x) if (size <= x) { s = x; goto found;}
+#include
+ return 0;
+found:
+ return s;
+}
+
+/* for small amounts of memory only (up to 128 KB) */
+static inline void *
+mini_alloc(unsigned size)
+{
+ return vmalloc(size);
+}
+
+static inline void
+mini_free(void *p)
+{
+ vfree(p);
+}
+
+/* memory is allocated in amounts of multiples of PAGE_SIZE */
+static inline void *
+big_alloc(unsigned size)
+{
+ return vmalloc(size);
+}
+
+static inline void
+big_free(void *p)
+{
+ vfree(p);
+}
+
+/* dirty hack to make stuff work with uml (otherwise high_physmem and end_vm
+ are not defined) */
+#ifdef CONFIG_UML_NET
+# undef TOP
+# ifdef CONFIG_HOST_2G_2G
+# define TOP 0x80000000
+# else
+# define TOP 0xc0000000
+# endif
+# undef SIZE
+# define SIZE ((CONFIG_NEST_LEVEL + CONFIG_KERNEL_HALF_GIGS) * 0x20000000)
+# undef START
+# define START (TOP - SIZE)
+# undef VMALLOC_OFFSET
+# define VMALLOC_OFFSET (8 * 1024 * 1024)
+# undef VMALLOC_START
+# define VMALLOC_START (((unsigned long) (START + 32 * 1024 * 1024) + \
+ VMALLOC_OFFSET) & ~(VMALLOC_OFFSET - 1))
+static unsigned long high_physmem = START + 32 * 1024 * 1024;
+static unsigned long end_vm = VMALLOC_START + 32 * 1024 * 1024;
+#endif /* CONFIG_UML_NET */
+
+
+
+
+#else /* __KERNEL__ */
+/*
+ * User space
+ */
+#include
+#if defined(__GLIBC__) && __GLIBC__ == 2
+# include
+#else /* libc5 */
+# include
+#endif
+#include
+#include
+#include
+#include // ULONG_MAX
+#include
+#include
+
+/* no assertions if not debugging */
+#ifndef DEBUG
+# undef assert
+# define assert(as) do {} while (0)
+#endif
+
+/* locking unnecessary in user space */
+#define synchronize_rcu(x) do {} while (0)
+
+/* printk compatibility */
+#define KERN_EMERG "KERN_EMERG: "
+#define KERN_ALERT "KERN_ALERT: "
+#define KERN_CRIT "KERN_CRIT: "
+#define KERN_ERR "KERN_ERR: "
+#define KERN_WARNING "KERN_WARNING: "
+#define KERN_NOTICE "KERN_NOTICE: "
+#define KERN_INFO "KERN_INFO: "
+#define KERN_DEBUG "KERN_DEBUG: "
+#define printk(str, args...) printf(str , ## args)
+
+/* netfilter verdict compatibility */
+#define NF_DROP 0
+#define NF_ACCEPT 1
+
+/* macro to annotate likely branch directions which results in the
+ blocks being reordered appropriately */
+#if __GNUC__ == 2 && __GNUC_MINOR__ < 96
+# define __builtin_expect(x, expected_value) (x)
+# define likely(x) __builtin_expect((x), 1)
+# define unlikely(x) __builtin_expect((x), 0)
+#endif
+
+static inline unsigned
+mini_alloc_size(unsigned size)
+{
+ unsigned int s;
+#define CACHE(x) if (size <= x) { s = x; goto found;}
+ CACHE(32);
+ CACHE(64);
+ CACHE(96);
+ CACHE(128);
+ CACHE(192);
+ CACHE(256);
+ CACHE(512);
+ CACHE(1024);
+ CACHE(2048);
+ CACHE(4096);
+ CACHE(8192);
+ CACHE(16384);
+ CACHE(32768);
+ CACHE(65536);
+ CACHE(131072);
+ return 0;
+found:
+ return s;
+}
+
+/* for small amounts of memory only (up to 128 KB) */
+static inline void *
+mini_alloc(unsigned size)
+{
+ return malloc(mini_alloc_size(size));
+}
+
+static inline void
+mini_free(void *p)
+{
+ free(p);
+}
+
+/* memory is allocated in amounts of multiples of PAGE_SIZE */
+static inline void *
+big_alloc(unsigned size)
+{
+ return malloc(big_alloc_size(size));
+}
+
+static inline void
+big_free(void *p)
+{
+ free(p);
+}
+
+#endif /* __KERNEL__ */
+
+#endif
diff -urN nf-hipac/user/nf-hipac.c nfhipac/user/nf-hipac.c
--- nf-hipac/user/nf-hipac.c 1970-01-01 08:00:00.000000000 +0800
+++ nfhipac/user/nf-hipac.c 2014-11-21 12:48:12.000000000 +0800
@@ -0,0 +1,55 @@
+/*
+ * High performance packet classification
+ *
+ *
+ * (c) 2004-2005 MARA Systems AB
+ * +-----------------------------+
+ * | Michael Bellion |
+ * | |
+ * +-----------------------------+
+ *
+ * heavily based on iptables code from the netfilter project
+ * which is (C) by the netfilter coreteam
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+extern char *lib_dir;
+
+extern int
+do_command(int argc, char *argv[]);
+
+int
+main(int argc, char **argv)
+{
+ lib_dir = getenv(/*"IPTABLES_LIB_DIR"*/"XXX");
+ if (!lib_dir)
+ lib_dir = IPT_LIB_DIR;
+
+#ifdef NO_SHARED_LIBS
+ init_extensions();
+#endif
+ return do_command(argc, argv);
+}
diff -urN nf-hipac/user/nf-hipac-core.c nfhipac/user/nf-hipac-core.c
--- nf-hipac/user/nf-hipac-core.c 1970-01-01 08:00:00.000000000 +0800
+++ nfhipac/user/nf-hipac-core.c 2014-11-21 12:47:32.000000000 +0800
@@ -0,0 +1,3281 @@
+/*
+ * High performance packet classification
+ *
+ *
+ * (c) 2004-2005 MARA Systems AB
+ * +-----------------------------+
+ * | Michael Bellion |
+ * | |
+ * +-----------------------------+
+ *
+ * (c) 2002-2003 hipac core team :
+ * +---------------------------+--------------------------+
+ * | Michael Bellion | Thomas Heinz |
+ * | | |
+ * +---------------------------+--------------------------+
+ *
+ * heavily based on iptables code from the netfilter project
+ * which is (C) by the netfilter coreteam
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+
+#include
+#if defined(__GLIBC__) && __GLIBC__ == 2
+# include
+# include
+# include
+#else /* libc5 */
+# include
+# include
+# include
+#endif
+
+#include "nfhp_com.h"
+#include
+
+#include "libnfhipac.h"
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+static int listing_start = 0;
+
+#define OPT_NONE 0x00000U
+#define OPT_NUMERIC 0x00001U
+#define OPT_VERBOSE 0x00002U
+#define OPT_LINENUMBERS 0x00004U
+#define OPT_DEBUG 0x00008U
+#define OPT_DEBUGN 0x00010U
+#define OPT_JUMP 0x00020U
+
+#define NUMBER_OF_OPT 6
+
+static const char optflags[NUMBER_OF_OPT + NUMBER_OF_DIM][18]
+= {
+ [0] = "n",
+ [1] = "v",
+ [2] = "-line",
+ [3] = "-debug",
+ [4] = "-debugn",
+ [5] = "j",
+ [NUMBER_OF_OPT + DIMID_STATE] = "-state",
+ [NUMBER_OF_OPT + DIMID_SRC_IP ] = "s",
+ [NUMBER_OF_OPT + DIMID_DEST_IP ] = "d",
+ [NUMBER_OF_OPT + DIMID_INIFACE] = "i",
+ [NUMBER_OF_OPT + DIMID_OUTIFACE] = "o",
+ [NUMBER_OF_OPT + DIMID_PROTO] = "p",
+ [NUMBER_OF_OPT + DIMID_FRAGMENT] = "f",
+ [NUMBER_OF_OPT + DIMID_DPORT] = "-dport",
+ [NUMBER_OF_OPT + DIMID_SPORT] = "-sport",
+ [NUMBER_OF_OPT + DIMID_SYN] = "-syn or --not-syn",
+ [NUMBER_OF_OPT + DIMID_ICMP_TYPE] = "-icmp-type",
+ [NUMBER_OF_OPT + DIMID_TTL] = "-ttl",
+};
+
+
+
+#define HIPAC_NATIVE_MATCHES 5
+static const char native_matches[HIPAC_NATIVE_MATCHES][6]
+= { "tcp", "udp", "icmp", "state", "ttl"};
+
+
+static const char cmdflags[] = { ' ', 'A', 'I', 'D', 'D', 'R', 'F',
+ 'N', 'X', 'E', 'P', 'L'};
+
+
+
+static char commands_v_options[CMD_MAX + 1][2] = {
+ //type 0: -n, --line, --exact
+ //type 1: -s, -p, -sport, -dport, ...
+ // 0 1 2
+
+/*NONE*/ {'x','x'},
+/*APPEND*/ {'x',' '},
+/*INSERT*/ {'x',' '},
+/*DELETE_RULE*/ {'x',' '},
+/*DELETE_POS*/ {'x','x'},
+/*REPLACE*/ {'x',' '},
+/*FLUSH*/ {'x','x'},
+/*NEW_CHAIN*/ {'x','x'},
+/*DELETE_CHAIN*/ {'x','x'},
+/*RENAME_CHAIN*/ {'x','x'},
+/*SET_POLICY*/ {'x','x'},
+/*LIST*/ {' ','x'},
+};
+
+
+static struct option original_opts[] = {
+ { "append", 1, 0, 'A' },
+ { "delete", 1, 0, 'D' },
+ { "insert", 1, 0, 'I' },
+ { "replace", 1, 0, 'R' },
+ { "list", 2, 0, 'L' },
+ { "flush", 2, 0, 'F' },
+ { "new-chain", 1, 0, 'N' },
+ { "delete-chain", 2, 0, 'X' },
+ { "rename-chain", 1, 0, 'E' },
+ { "policy", 1, 0, 'P' },
+ { "source", 1, 0, 's' },
+ { "destination", 1, 0, 'd' },
+ { "src", 1, 0, 's' }, /* synonym */
+ { "dst", 1, 0, 'd' }, /* synonym */
+ { "protocol", 1, 0, 'p' },
+ { "in-interface", 1, 0, 'i' },
+ { "jump", 1, 0, 'j' },
+ { "table", 1, 0, 't' },
+ { "match", 1, 0, 'm' },
+ { "numeric", 0, 0, 'n' },
+ { "out-interface", 1, 0, 'o' },
+ { "verbose", 0, 0, 'v' },
+ { "fragments", 0, 0, 'f' },
+ { "version", 0, 0, 'V' },
+ { "help", 2, 0, 'h' },
+ { "debug", 0, 0, 9 },
+ { "debugn", 0, 0, 10 },
+ { "line-numbers", 0, 0, 11 },
+ { "state", 1, 0, 12 },
+ { "ttl", 1, 0, 14 },
+ { "icmp-type", 1, 0, 15 },
+ { "source-port", 1, 0, 16 },
+ { "sport", 1, 0, 16 }, /* synonym */
+ { "destination-port", 1, 0, 17 },
+ { "dport", 1, 0, 17 }, /* synonym */
+ { "syn", 0, 0, 18 },
+ { "not-syn", 0, 0, 19 },
+ { 0 }
+};
+
+/* we need this for iptables-restore. iptables-restore.c sets line to the
+ * current line of the input file, in order to give a more precise error
+ * message. iptables itself doesn't need this, so it is initialized to the
+ * magic number of -1 */
+int line = -1;
+
+static struct option *opts = original_opts;
+static unsigned int global_option_offset = 0;
+static unsigned int options = 0;
+
+/* A few hardcoded protocols for 'all' and in case the user has no
+ /etc/protocols */
+struct pprot {
+ char *name;
+ u_int8_t num;
+};
+
+/* Primitive headers... */
+/* defined in netinet/in.h */
+#if 0
+#ifndef IPPROTO_ESP
+#define IPPROTO_ESP 50
+#endif
+#ifndef IPPROTO_AH
+#define IPPROTO_AH 51
+#endif
+#endif
+
+static const struct pprot chain_protos[] = {
+ { "tcp", IPPROTO_TCP },
+ { "udp", IPPROTO_UDP },
+ { "icmp", IPPROTO_ICMP },
+ { "esp", IPPROTO_ESP },
+ { "ah", IPPROTO_AH },
+ { "sctp", IPPROTO_SCTP },
+ { "all", 0 },
+};
+
+struct icmp_names {
+ const char *name;
+ u_int8_t type;
+ u_int8_t code_min, code_max;
+};
+
+static const struct icmp_names icmp_codes[] = {
+ { "echo-reply", 0, 0, 0xFF },
+ /* Alias */ { "pong", 0, 0, 0xFF },
+
+ { "destination-unreachable", 3, 0, 0xFF },
+ { "network-unreachable", 3, 0, 0 },
+ { "host-unreachable", 3, 1, 1 },
+ { "protocol-unreachable", 3, 2, 2 },
+ { "port-unreachable", 3, 3, 3 },
+ { "fragmentation-needed", 3, 4, 4 },
+ { "source-route-failed", 3, 5, 5 },
+ { "network-unknown", 3, 6, 6 },
+ { "host-unknown", 3, 7, 7 },
+ { "network-prohibited", 3, 9, 9 },
+ { "host-prohibited", 3, 10, 10 },
+ { "TOS-network-unreachable", 3, 11, 11 },
+ { "TOS-host-unreachable", 3, 12, 12 },
+ { "communication-prohibited", 3, 13, 13 },
+ { "host-precedence-violation", 3, 14, 14 },
+ { "precedence-cutoff", 3, 15, 15 },
+
+ { "source-quench", 4, 0, 0xFF },
+
+ { "redirect", 5, 0, 0xFF },
+ { "network-redirect", 5, 0, 0 },
+ { "host-redirect", 5, 1, 1 },
+ { "TOS-network-redirect", 5, 2, 2 },
+ { "TOS-host-redirect", 5, 3, 3 },
+
+ { "echo-request", 8, 0, 0xFF },
+ /* Alias */ { "ping", 8, 0, 0xFF },
+
+ { "router-advertisement", 9, 0, 0xFF },
+
+ { "router-solicitation", 10, 0, 0xFF },
+
+ { "time-exceeded", 11, 0, 0xFF },
+ /* Alias */ { "ttl-exceeded", 11, 0, 0xFF },
+ { "ttl-zero-during-transit", 11, 0, 0 },
+ { "ttl-zero-during-reassembly", 11, 1, 1 },
+
+ { "parameter-problem", 12, 0, 0xFF },
+ { "ip-header-bad", 12, 0, 0 },
+ { "required-option-missing", 12, 1, 1 },
+
+ { "timestamp-request", 13, 0, 0xFF },
+
+ { "timestamp-reply", 14, 0, 0xFF },
+
+ { "address-mask-request", 17, 0, 0xFF },
+
+ { "address-mask-reply", 18, 0, 0xFF }
+};
+
+struct dim_match
+{
+ u_int8_t invert;
+ u_int32_t left;
+ u_int32_t right;
+};
+
+#define OPTION_OFFSET 256
+
+#define FMT_NUMERIC 0x0001
+#define FMT_NOCOUNTS 0x0002
+#define FMT_KILOMEGAGIGA 0x0004
+#define FMT_OPTIONS 0x0008
+#define FMT_NOTABLE 0x0010
+#define FMT_NOTARGET 0x0020
+#define FMT_VIA 0x0040
+#define FMT_NONEWLINE 0x0080
+#define FMT_LINENUMBERS 0x0100
+
+#define FMT_PRINT_RULE (FMT_NOCOUNTS | FMT_OPTIONS | FMT_VIA \
+ | FMT_NUMERIC | FMT_NOTABLE)
+
+
+/*
+ * =============== iptables matches/targets compatibility ===============
+ */
+
+#ifndef PROC_SYS_MODPROBE
+#define PROC_SYS_MODPROBE "/proc/sys/kernel/modprobe"
+#endif
+
+
+/*
+ * libiptc.h declarations
+ */
+
+#ifndef IPT_MIN_ALIGN
+/* ipt_entry has pointers and u_int64_t's in it, so if you align to
+ it, you'll also align to any crazy matches and targets someone
+ might write */
+#define IPT_MIN_ALIGN (__alignof__(struct ipt_entry))
+#endif
+
+#define IPT_ALIGN(s) (((s) + ((IPT_MIN_ALIGN)-1)) & ~((IPT_MIN_ALIGN)-1))
+
+typedef char ipt_chainlabel[32];
+
+#define IPTC_LABEL_ACCEPT "ACCEPT"
+#define IPTC_LABEL_DROP "DROP"
+#define IPTC_LABEL_QUEUE "QUEUE"
+#define IPTC_LABEL_RETURN "RETURN"
+
+/* Transparent handle type. */
+typedef struct iptc_handle *iptc_handle_t;
+
+
+/*
+ * iptables.h declarations
+ */
+
+#ifndef IPT_LIB_DIR
+#define IPT_LIB_DIR "/usr/local/lib/iptables"
+#endif
+
+#ifndef IPPROTO_SCTP
+#define IPPROTO_SCTP 132
+#endif
+
+#ifndef IPT_SO_GET_REVISION_MATCH /* Old kernel source. */
+#define IPT_SO_GET_REVISION_MATCH (IPT_BASE_CTL + 2)
+#define IPT_SO_GET_REVISION_TARGET (IPT_BASE_CTL + 3)
+
+struct ipt_get_revision
+{
+ char name[IPT_FUNCTION_MAXNAMELEN-1];
+
+ u_int8_t revision;
+};
+#endif /* IPT_SO_GET_REVISION_MATCH Old kernel source */
+
+struct iptables_rule_match
+{
+ struct iptables_rule_match *next;
+
+ struct iptables_match *match;
+};
+
+struct iptables_match
+{
+ struct iptables_match *next;
+
+ ipt_chainlabel name;
+
+ u_int8_t revision;
+
+ const char *version;
+
+ /* Size of match data. */
+ size_t size;
+
+ /* Size of match data relevent for userspace comparison purposes */
+ size_t userspacesize;
+
+ /* Function which prints out usage message. */
+ void (*help)(void);
+
+ /* Initialize the match. */
+ void (*init)(struct ipt_entry_match *m, unsigned int *nfcache);
+
+ /* Function which parses command options; returns true if it
+ ate an option */
+ int (*parse)(int c, char **argv, int invert, unsigned int *flags,
+ const struct ipt_entry *entry,
+ unsigned int *nfcache,
+ struct ipt_entry_match **match);
+
+ /* Final check; exit if not ok. */
+ void (*final_check)(unsigned int flags);
+
+ /* Prints out the match iff non-NULL: put space at end */
+ void (*print)(const struct ipt_ip *ip,
+ const struct ipt_entry_match *match, int numeric);
+
+ /* Saves the match info in parsable form to stdout. */
+ void (*save)(const struct ipt_ip *ip,
+ const struct ipt_entry_match *match);
+
+ /* Pointer to list of extra command-line options */
+ const struct option *extra_opts;
+
+ /* Ignore these men behind the curtain: */
+ unsigned int option_offset;
+ struct ipt_entry_match *m;
+ unsigned int mflags;
+#ifdef NO_SHARED_LIBS
+ unsigned int loaded; /* simulate loading so options are merged properly */
+#endif
+};
+
+struct iptables_target
+{
+ struct iptables_target *next;
+
+ ipt_chainlabel name;
+
+ u_int8_t revision;
+
+ const char *version;
+
+ /* Size of target data. */
+ size_t size;
+
+ /* Size of target data relevent for userspace comparison purposes */
+ size_t userspacesize;
+
+ /* Function which prints out usage message. */
+ void (*help)(void);
+
+ /* Initialize the target. */
+ void (*init)(struct ipt_entry_target *t, unsigned int *nfcache);
+
+ /* Function which parses command options; returns true if it
+ ate an option */
+ int (*parse)(int c, char **argv, int invert, unsigned int *flags,
+ const struct ipt_entry *entry,
+ struct ipt_entry_target **target);
+
+ /* Final check; exit if not ok. */
+ void (*final_check)(unsigned int flags);
+
+ /* Prints out the target iff non-NULL: put space at end */
+ void (*print)(const struct ipt_ip *ip,
+ const struct ipt_entry_target *target, int numeric);
+
+ /* Saves the targinfo in parsable form to stdout. */
+ void (*save)(const struct ipt_ip *ip,
+ const struct ipt_entry_target *target);
+
+ /* Pointer to list of extra command-line options */
+ struct option *extra_opts;
+
+ /* Ignore these men behind the curtain: */
+ unsigned int option_offset;
+ struct ipt_entry_target *t;
+ unsigned int tflags;
+ unsigned int used;
+#ifdef NO_SHARED_LIBS
+ unsigned int loaded; /* simulate loading so options are merged properly */
+#endif
+};
+
+
+/* Your shared library should call one of these. */
+void register_match(struct iptables_match *me);
+void register_target(struct iptables_target *me);
+
+struct in_addr *dotted_to_addr(const char *dotted);
+char *addr_to_dotted(const struct in_addr *addrp);
+char *addr_to_anyname(const struct in_addr *addr);
+char *mask_to_dotted(const struct in_addr *mask);
+
+void parse_hostnetworkmask(const char *name, struct in_addr **addrpp,
+ struct in_addr *maskp, unsigned int *naddrs);
+u_int16_t parse_protocol(const char *s);
+void parse_interface(const char *arg, char *vianame, unsigned char *mask);
+
+/* Keeping track of external matches and targets: linked lists. */
+struct iptables_match *iptables_matches = NULL;
+struct iptables_target *iptables_targets = NULL;
+
+enum ipt_tryload {
+ DONT_LOAD,
+ TRY_LOAD,
+ LOAD_MUST_SUCCEED
+};
+
+struct iptables_target *find_target(const char *name, enum ipt_tryload);
+struct iptables_match *find_match(const char *name, enum ipt_tryload,
+ struct iptables_rule_match **match);
+
+/* kernel revision handling */
+int kernel_version;
+void get_kernel_version(void);
+#define LINUX_VERSION(x,y,z) (0x10000*(x) + 0x100*(y) + z)
+#define LINUX_VERSION_MAJOR(x) (((x)>>16) & 0xFF)
+#define LINUX_VERSION_MINOR(x) (((x)>> 8) & 0xFF)
+#define LINUX_VERSION_PATCH(x) ( (x) & 0xFF)
+
+
+/*
+ * iptables_common.h declarations
+ */
+
+enum exittype {
+ OTHER_PROBLEM = 1,
+ PARAMETER_PROBLEM,
+ VERSION_PROBLEM
+};
+
+/* this is a special 64bit data type that is 8-byte aligned */
+#define aligned_u64 unsigned long long __attribute__((aligned(8)))
+
+void exit_printhelp(struct iptables_rule_match
+ *matches) __attribute__((noreturn));
+void exit_tryhelp(int) __attribute__((noreturn));
+int check_inverse(const char option[], int *invert, int *optind, int argc);
+int string_to_number(const char *,
+ unsigned int,
+ unsigned int,
+ unsigned int *);
+extern int string_to_number_l(const char *,
+ unsigned long int,
+ unsigned long int,
+ unsigned long *);
+extern int string_to_number_ll(const char *,
+ unsigned long long int,
+ unsigned long long int,
+ unsigned long long *);
+int iptables_insmod(const char *modname, const char *modprobe);
+void exit_error(enum exittype, char *, ...)__attribute__((noreturn,
+ format(printf,2,3)));
+
+const char *program_name = "nf-hipac";
+const char *program_version = "0.9.1";
+char *lib_dir;
+
+#ifdef NO_SHARED_LIBS
+# ifdef _INIT
+# define _init _INIT
+# endif
+ extern void init_extensions(void);
+#endif
+
+
+
+/*
+ * iptables_common.h implementations (only necessary functions)
+ */
+
+void
+exit_printhelp(struct iptables_rule_match *matches)
+{
+ struct iptables_rule_match *matchp = NULL;
+ struct iptables_target *t = NULL;
+
+ printf("\n%s v%s\n\n"
+"Usage: %s -[AD] chain rule-specification [options]\n"
+" %s -[RI] chain rulenum rule-specification [options]\n"
+" %s -D chain rulenum [options]\n"
+" %s -[LF] [chain] [options]\n"
+" %s -[NX] chain\n"
+" %s -E old-chain-name new-chain-name\n"
+" %s -P chain target [options]\n"
+" %s -h (print this help information)\n\n",
+ program_name, program_version, program_name, program_name,
+ program_name, program_name, program_name, program_name,
+ program_name, program_name);
+
+ printf(
+"Commands:\n"
+"Either long or short options are allowed.\n"
+" --append -A chain Append to chain\n"
+" --delete -D chain Delete matching rule from chain\n"
+" --delete -D chain rulenum\n"
+" Delete rule rulenum (1 = first) from chain\n"
+" --insert -I chain [rulenum]\n"
+" Insert in chain as rulenum (default 1 = first)\n"
+" --replace -R chain rulenum\n"
+" Replace rule rulenum (1 = first) in chain\n"
+" --list -L [chain] List the rules in a chain or all chains\n"
+" --flush -F [chain] Delete all rules in chain or all chains\n"
+" --new -N chain Create a new user-defined chain\n"
+" --delete-chain\n"
+" -X [chain] Delete a user-defined chain\n"
+" --policy -P chain target\n"
+" Change policy on chain to target\n"
+" --rename-chain\n"
+" -E old-chain new-chain\n"
+" Change chain name, (moving any references)\n"
+"----------\n"
+"Options:\n"
+" --proto -p [!] proto protocol: by number or name, eg. `tcp'\n"
+" --source -s [!] address[/mask] or\n"
+" address[:address]\n"
+" source(s) specification\n"
+" --destination -d [!] address[/mask] or\n"
+" address[:address]\n"
+" destination(s) specification\n"
+" --in-interface -i [!] devname\n"
+" network interface name\n"
+" --jump -j target\n"
+" target for rule \n"
+" --numeric -n numeric output of addresses and ports\n"
+" --out-interface -o [!] devname\n"
+" network interface name\n"
+" --verbose -v verbose mode\n"
+" --line-numbers print line numbers when listing\n"
+" [!] --fragment -f match second or further fragments only\n"
+" --version -V print package version.\n"
+"----------\n"
+"TCP options:\n"
+" --syn match when only SYN flag set\n"
+" --not-syn match when not only SYN flag set\n"
+" --source-port [!] port[:port]\n"
+" --sport ...\n"
+" match source port(s)\n"
+" --destination-port [!] port[:port]\n"
+" --dport ...\n"
+" match destination port(s)\n"
+"----------\n"
+"UDP options:\n"
+" --source-port [!] port[:port]\n"
+" --sport ...\n"
+" match source port(s)\n"
+" --destination-port [!] port[:port]\n"
+" --dport ...\n"
+" match destination port(s)\n"
+"----------\n"
+"ICMP options:\n"
+" --icmp-type typename match icmp type\n"
+" (or numeric type or type/code)\n"
+" see also: nf-hipac -h icmp\n"
+"----------\n"
+"STATE options:\n"
+" --state [!] [INVALID|ESTABLISHED|NEW|RELATED|UNTRACKED|ESTABLISHED,RELATED]\n"
+" State to match\n"
+"----------\n"
+"TTL options:\n"
+" --ttl value[:value] match time to live value(s)\n"
+"----------\n");
+
+
+ /* Print out any special helps. A user might like to be able
+ to add a --help to the commandline, and see expected
+ results. So we call help for all matches & targets */
+
+ if (iptables_targets) {
+ printf("\n-----------------\n"
+ "Iptables targets:\n"
+ "-----------------\n");
+ for (t = iptables_targets; t; t = t->next) {
+ if (t->used) {
+ printf("\n");
+ t->help();
+ }
+ }
+ }
+
+ if (matches) {
+ printf("\n-----------------\n"
+ "Iptables matches:\n"
+ "-----------------\n");
+ for (matchp = matches; matchp; matchp = matchp->next) {
+ printf("\n");
+ matchp->match->help();
+ }
+ }
+
+ exit(0);
+}
+
+
+int
+check_inverse(const char option[], int *invert, int *optind, int argc)
+{
+ if (option && strcmp(option, "!") == 0) {
+ if (*invert)
+ exit_error(PARAMETER_PROBLEM,
+ "Multiple `!' flags not allowed");
+ *invert = TRUE;
+ if (optind) {
+ *optind = *optind+1;
+ if (argc && *optind > argc)
+ exit_error(PARAMETER_PROBLEM,
+ "no argument following `!'");
+ }
+
+ return TRUE;
+ }
+ return FALSE;
+}
+
+
+int
+string_to_number_ll(const char *s, unsigned long long min,
+ unsigned long long max, unsigned long long *ret)
+{
+ unsigned long long number;
+ char *end;
+
+ /* Handle hex, octal, etc. */
+ errno = 0;
+ number = strtoull(s, &end, 0);
+ if (*end == '\0' && end != s) {
+ /* we parsed a number, let's see if we want this */
+ if (errno != ERANGE && min <= number &&
+ (!max || number <= max)) {
+ *ret = number;
+ return 0;
+ }
+ }
+ return -1;
+}
+
+int
+string_to_number_l(const char *s, unsigned long min, unsigned long max,
+ unsigned long *ret)
+{
+ int result;
+ unsigned long long number;
+
+ result = string_to_number_ll(s, min, max, &number);
+ *ret = (unsigned long)number;
+
+ return result;
+}
+
+int
+string_to_number(const char *s, unsigned int min, unsigned int max,
+ unsigned int *ret)
+{
+ int result;
+ unsigned long number;
+
+ result = string_to_number_l(s, min, max, &number);
+ *ret = (unsigned int)number;
+
+ return result;
+}
+
+static char *get_modprobe(void)
+{
+ int procfile;
+ char *ret;
+
+#define PROCFILE_BUFSIZ 1024
+ procfile = open(PROC_SYS_MODPROBE, O_RDONLY);
+ if (procfile < 0)
+ return NULL;
+
+ ret = (char *) malloc(PROCFILE_BUFSIZ);
+ if (ret) {
+ memset(ret, 0, PROCFILE_BUFSIZ);
+ switch (read(procfile, ret, PROCFILE_BUFSIZ)) {
+ case -1: goto fail;
+ case PROCFILE_BUFSIZ: goto fail; /* Partial read. Wierd */
+ }
+ if (ret[strlen(ret)-1]=='\n')
+ ret[strlen(ret)-1]=0;
+ close(procfile);
+ return ret;
+ }
+ fail:
+ free(ret);
+ close(procfile);
+ return NULL;
+}
+
+int iptables_insmod(const char *modname, const char *modprobe)
+{
+ char *buf = NULL;
+ char *argv[3];
+ int status;
+
+ /* If they don't explicitly set it, read out of kernel */
+ if (!modprobe) {
+ buf = get_modprobe();
+ if (!buf)
+ return -1;
+ modprobe = buf;
+ }
+
+ switch (fork()) {
+ case 0:
+ argv[0] = (char *)modprobe;
+ argv[1] = (char *)modname;
+ argv[2] = NULL;
+ execv(argv[0], argv);
+
+ /* not usually reached */
+ exit(1);
+ case -1:
+ return -1;
+
+ default: /* parent */
+ wait(&status);
+ }
+
+ free(buf);
+ if (WIFEXITED(status) && WEXITSTATUS(status) == 0)
+ return 0;
+ return -1;
+}
+
+void clear_rule_matches(struct iptables_rule_match **matches)
+{
+ struct iptables_rule_match *matchp, *tmp;
+
+ for (matchp = *matches; matchp;) {
+ tmp = matchp->next;
+ free(matchp);
+ matchp = tmp;
+ }
+
+ *matches = NULL;
+}
+
+
+static void set_revision(char *name, u_int8_t revision)
+{
+ /* Old kernel sources don't have ".revision" field,
+ but we stole a byte from name. */
+ name[IPT_FUNCTION_MAXNAMELEN - 2] = '\0';
+ name[IPT_FUNCTION_MAXNAMELEN - 1] = revision;
+}
+
+
+static void free_opts(int reset_offset)
+{
+ if (opts != original_opts) {
+ free(opts);
+ opts = original_opts;
+ if (reset_offset)
+ global_option_offset = 0;
+ }
+}
+
+
+void
+exit_error(enum exittype status, char *msg, ...)
+{
+ va_list args;
+ va_start(args, msg);
+ fprintf(stderr, "%s v%s: ", program_name, program_version);
+ vfprintf(stderr, msg, args);
+ va_end(args);
+ fprintf(stderr, "\n");
+ if (status == PARAMETER_PROBLEM)
+ exit_tryhelp(status);
+ if (status == VERSION_PROBLEM)
+ fprintf(stderr,
+ "Perhaps nf-hipac or your "
+ "kernel needs to be upgraded.\n");
+ free_opts(1);
+ exit(status);
+}
+
+
+void
+exit_tryhelp(int status)
+{
+ if (line != -1)
+ fprintf(stderr, "Error occurred at line: %d\n", line);
+ fprintf(stderr, "Try `%s -h' or '%s --help' for more information.\n",
+ program_name, program_name );
+ free_opts(1);
+ exit(status);
+}
+
+
+/*
+ * iptables.h implementations (only necessary functions)
+ */
+
+static int compatible_revision(const char *name, u_int8_t revision, int opt)
+{
+ struct ipt_get_revision rev;
+ socklen_t s = sizeof(rev);
+ int max_rev, sockfd;
+
+ sockfd = socket(AF_INET, SOCK_RAW, IPPROTO_RAW);
+ if (sockfd < 0) {
+ fprintf(stderr, "Could not open socket to kernel: %s\n",
+ strerror(errno));
+ exit(1);
+ }
+
+ strcpy(rev.name, name);
+ rev.revision = revision;
+
+ max_rev = getsockopt(sockfd, IPPROTO_IP, opt, &rev, &s);
+ if (max_rev < 0) {
+ /* Definitely don't support this? */
+ if (errno == EPROTONOSUPPORT) {
+ close(sockfd);
+ return 0;
+ } else if (errno == ENOPROTOOPT) {
+ close(sockfd);
+ /* Assume only revision 0 support (old kernel) */
+ return (revision == 0);
+ } else {
+ fprintf(stderr, "getsockopt failed strangely: %s\n",
+ strerror(errno));
+ exit(1);
+ }
+ }
+ close(sockfd);
+ return 1;
+}
+
+static int compatible_match_revision(const char *name, u_int8_t revision)
+{
+ return compatible_revision(name, revision, IPT_SO_GET_REVISION_MATCH);
+}
+
+static int compatible_target_revision(const char *name, u_int8_t revision)
+{
+ return compatible_revision(name, revision, IPT_SO_GET_REVISION_TARGET);
+}
+
+void
+register_match(struct iptables_match *me)
+{
+ struct iptables_match **i;
+
+ /*if (strcmp(me->version, program_version) != 0) {
+ *fprintf(stderr, "%s: match `%s' v%s (I'm v%s).\n",
+ * program_name, me->name, me->version, program_version);
+ *exit(1);
+ *} */
+
+ struct iptables_match *old;
+
+ /* Revision field stole a char from name. */
+ if (strlen(me->name) >= IPT_FUNCTION_MAXNAMELEN-1) {
+ fprintf(stderr, "%s: target `%s' has invalid name\n",
+ program_name, me->name);
+ exit(1);
+ }
+
+ old = find_match(me->name, DONT_LOAD, NULL);
+ if (old) {
+ if (old->revision == me->revision) {
+ fprintf(stderr,
+ "%s: match `%s' already registered.\n",
+ program_name, me->name);
+ exit(1);
+ }
+
+ /* Now we have two (or more) options, check compatibility. */
+ if (compatible_match_revision(old->name, old->revision)
+ && old->revision > me->revision)
+ return;
+
+ /* Replace if compatible. */
+ if (!compatible_match_revision(me->name, me->revision))
+ return;
+
+ /* Delete old one. */
+ for (i = &iptables_matches; *i!=old; i = &(*i)->next);
+ *i = old->next;
+ }
+
+ if (me->size != IPT_ALIGN(me->size)) {
+ fprintf(stderr, "%s: match `%s' has invalid size %u.\n",
+ program_name, me->name, (unsigned int) me->size);
+ exit(1);
+ }
+
+ /* Append to list. */
+ for (i = &iptables_matches; *i; i = &(*i)->next);
+ me->next = NULL;
+ *i = me;
+
+ me->m = NULL;
+ me->mflags = 0;
+}
+
+void
+register_target(struct iptables_target *me)
+{
+ /*if (strcmp(me->version, program_version) != 0) {
+ *fprintf(stderr, "%s: target `%s' v%s (I'm v%s).\n",
+ * program_name, me->name, me->version, program_version);
+ *exit(1);
+ *} */
+
+ struct iptables_target *old;
+
+ /* Revision field stole a char from name. */
+ if (strlen(me->name) >= IPT_FUNCTION_MAXNAMELEN-1) {
+ fprintf(stderr, "%s: target `%s' has invalid name\n",
+ program_name, me->name);
+ exit(1);
+ }
+
+ old = find_target(me->name, DONT_LOAD);
+ if (old) {
+ struct iptables_target **i;
+
+ if (old->revision == me->revision) {
+ fprintf(stderr,
+ "%s: target `%s' already registered.\n",
+ program_name, me->name);
+ exit(1);
+ }
+
+ /* Now we have two (or more) options, check compatibility. */
+ if (compatible_target_revision(old->name, old->revision)
+ && old->revision > me->revision)
+ return;
+
+ /* Replace if compatible. */
+ if (!compatible_target_revision(me->name, me->revision))
+ return;
+
+ /* Delete old one. */
+ for (i = &iptables_targets; *i!=old; i = &(*i)->next);
+ *i = old->next;
+ }
+
+ if (me->size != IPT_ALIGN(me->size)) {
+ fprintf(stderr, "%s: target `%s' has invalid size %u.\n",
+ program_name, me->name, (unsigned int) me->size);
+ exit(1);
+ }
+
+ /* Prepend to list. */
+ me->next = iptables_targets;
+ iptables_targets = me;
+ me->t = NULL;
+ me->tflags = 0;
+}
+
+struct in_addr *
+dotted_to_addr(const char *dotted)
+{
+ static struct in_addr addr;
+ unsigned char *addrp;
+ char *p, *q;
+ unsigned int onebyte;
+ int i;
+ char buf[20];
+
+ /* copy dotted string, because we need to modify it */
+ strncpy(buf, dotted, sizeof(buf) - 1);
+ buf[sizeof(buf) - 1] = '\0';
+ addrp = (unsigned char *) &(addr.s_addr);
+
+ p = buf;
+ for (i = 0; i < 3; i++) {
+ if ((q = strchr(p, '.')) == NULL)
+ return (struct in_addr *) NULL;
+
+ *q = '\0';
+ if (string_to_number(p, 0, 255, &onebyte) == -1)
+ return (struct in_addr *) NULL;
+
+ addrp[i] = (unsigned char) onebyte;
+ p = q + 1;
+ }
+
+ /* we've checked 3 bytes, now we check the last one */
+ if (string_to_number(p, 0, 255, &onebyte) == -1)
+ return (struct in_addr *) NULL;
+
+ addrp[3] = (unsigned char) onebyte;
+
+ return &addr;
+}
+
+char *
+addr_to_dotted(const struct in_addr *addrp)
+{
+ static char buf[20];
+ const unsigned char *bytep;
+
+ bytep = (const unsigned char *) &(addrp->s_addr);
+ sprintf(buf, "%d.%d.%d.%d", bytep[0], bytep[1], bytep[2], bytep[3]);
+ return buf;
+}
+
+static char *
+addr_to_host(const struct in_addr *addr)
+{
+ struct hostent *host;
+
+ if ((host = gethostbyaddr((char *) addr,
+ sizeof(struct in_addr), AF_INET)) != NULL)
+ return (char *) host->h_name;
+
+ return (char *) NULL;
+}
+
+static char *
+addr_to_network(const struct in_addr *addr)
+{
+ struct netent *net;
+
+ if ((net = getnetbyaddr((long) ntohl(addr->s_addr), AF_INET)) != NULL)
+ return (char *) net->n_name;
+
+ return (char *) NULL;
+}
+
+char *
+addr_to_anyname(const struct in_addr *addr)
+{
+ char *name;
+
+ if ((name = addr_to_host(addr)) != NULL ||
+ (name = addr_to_network(addr)) != NULL)
+ return name;
+
+ return addr_to_dotted(addr);
+}
+
+char *
+mask_to_dotted(const struct in_addr *mask)
+{
+ int i;
+ static char buf[20];
+ u_int32_t maskaddr, bits;
+
+ maskaddr = ntohl(mask->s_addr);
+
+ if (maskaddr == 0xFFFFFFFFL)
+ /* we don't want to see "/32" */
+ return "";
+
+ i = 32;
+ bits = 0xFFFFFFFEL;
+ while (--i >= 0 && maskaddr != bits)
+ bits <<= 1;
+ if (i >= 0)
+ sprintf(buf, "/%d", i);
+ else
+ /* mask was not a decent combination of 1's and 0's */
+ sprintf(buf, "/%s", addr_to_dotted(mask));
+
+ return buf;
+}
+
+static struct in_addr *
+network_to_addr(const char *name)
+{
+ struct netent *net;
+ static struct in_addr addr;
+
+ if ((net = getnetbyname(name)) != NULL) {
+ if (net->n_addrtype != AF_INET)
+ return (struct in_addr *) NULL;
+ addr.s_addr = htonl((unsigned long) net->n_net);
+ return &addr;
+ }
+
+ return (struct in_addr *) NULL;
+}
+
+static void *
+fw_malloc(size_t size)
+{
+ void *p;
+
+ if ((p = malloc(size)) == NULL) {
+ perror("nf-hipac: malloc failed");
+ exit(1);
+ }
+ return p;
+}
+
+static void *
+fw_calloc(size_t count, size_t size)
+{
+ void *p;
+
+ if ((p = calloc(count, size)) == NULL) {
+ perror("nf-hipac: calloc failed");
+ exit(1);
+ }
+ return p;
+}
+
+static void
+inaddrcpy(struct in_addr *dst, struct in_addr *src)
+{
+ /* memcpy(dst, src, sizeof(struct in_addr)); */
+ dst->s_addr = src->s_addr;
+}
+
+static struct in_addr *
+host_to_addr(const char *name, unsigned int *naddr)
+{
+ struct hostent *host;
+ struct in_addr *addr;
+ unsigned int i;
+
+ *naddr = 0;
+ if ((host = gethostbyname(name)) != NULL) {
+ if (host->h_addrtype != AF_INET ||
+ host->h_length != sizeof(struct in_addr))
+ return (struct in_addr *) NULL;
+
+ while (host->h_addr_list[*naddr] != (char *) NULL)
+ (*naddr)++;
+ addr = fw_calloc(*naddr, sizeof(struct in_addr) * *naddr);
+ //FIXME: why * *naddr, should be enough without it!
+ for (i = 0; i < *naddr; i++)
+ inaddrcpy(&(addr[i]),
+ (struct in_addr *) host->h_addr_list[i]);
+ return addr;
+ }
+
+ return (struct in_addr *) NULL;
+}
+
+static struct in_addr *
+parse_hostnetwork(const char *name, unsigned int *naddrs)
+{
+ struct in_addr *addrp, *addrptmp;
+
+ if ((addrptmp = dotted_to_addr(name)) != NULL ||
+ (addrptmp = network_to_addr(name)) != NULL) {
+ addrp = fw_malloc(sizeof(struct in_addr));
+ inaddrcpy(addrp, addrptmp);
+ *naddrs = 1;
+ return addrp;
+ }
+ if ((addrp = host_to_addr(name, naddrs)) != NULL)
+ return addrp;
+
+ exit_error(PARAMETER_PROBLEM, "host/network `%s' not found", name);
+}
+
+static struct in_addr *
+parse_mask(char *mask)
+{
+ static struct in_addr maskaddr;
+ struct in_addr *addrp;
+ unsigned int bits;
+
+ if (mask == NULL) {
+ /* no mask at all defaults to 32 bits */
+ maskaddr.s_addr = 0xFFFFFFFF;
+ return &maskaddr;
+ }
+ if ((addrp = dotted_to_addr(mask)) != NULL)
+ /* dotted_to_addr already returns a network byte order addr */
+ return addrp;
+ if (string_to_number(mask, 0, 32, &bits) == -1)
+ exit_error(PARAMETER_PROBLEM,
+ "invalid mask `%s' specified", mask);
+ if (bits != 0) {
+ maskaddr.s_addr = htonl(0xFFFFFFFF << (32 - bits));
+ return &maskaddr;
+ }
+
+ maskaddr.s_addr = 0L;
+ return &maskaddr;
+}
+
+void
+parse_hostnetworkmask(const char *name, struct in_addr **addrpp,
+ struct in_addr *maskp, unsigned int *naddrs)
+{
+ struct in_addr *addrp;
+ char buf[256];
+ char *p;
+ int i, j, k, n;
+
+ strncpy(buf, name, sizeof(buf) - 1);
+ buf[sizeof(buf) - 1] = '\0';
+ if ((p = strrchr(buf, '/')) != NULL) {
+ *p = '\0';
+ addrp = parse_mask(p + 1);
+ } else
+ addrp = parse_mask(NULL);
+ inaddrcpy(maskp, addrp);
+
+ /* if a null mask is given, the name is ignored, like in "any/0" */
+ if (maskp->s_addr == 0L)
+ strcpy(buf, "0.0.0.0");
+
+ addrp = *addrpp = parse_hostnetwork(buf, naddrs);
+ n = *naddrs;
+ for (i = 0, j = 0; i < n; i++) {
+ addrp[j++].s_addr &= maskp->s_addr;
+ for (k = 0; k < j - 1; k++) {
+ if (addrp[k].s_addr == addrp[j - 1].s_addr) {
+ (*naddrs)--;
+ j--;
+ break;
+ }
+ }
+ }
+}
+
+u_int16_t
+parse_protocol(const char *s)
+{
+ unsigned int proto;
+
+ if (string_to_number(s, 0, 255, &proto) == -1) {
+ struct protoent *pent;
+
+ if ((pent = getprotobyname(s)))
+ proto = pent->p_proto;
+ else {
+ unsigned int i;
+ for (i = 0;
+ i < sizeof(chain_protos)/sizeof(struct pprot);
+ i++) {
+ if (strcmp(s, chain_protos[i].name) == 0) {
+ proto = chain_protos[i].num;
+ break;
+ }
+ }
+ if (i == sizeof(chain_protos)/sizeof(struct pprot))
+ exit_error(PARAMETER_PROBLEM,
+ "unknown protocol `%s' specified",
+ s);
+ }
+ }
+
+ return (u_int16_t)proto;
+}
+
+void parse_interface(const char *arg, char *vianame, unsigned char *mask)
+{
+ int vialen = strlen(arg);
+ unsigned int i;
+
+ memset(mask, 0, IFNAMSIZ);
+ memset(vianame, 0, IFNAMSIZ);
+
+ if (vialen + 1 > IFNAMSIZ)
+ exit_error(PARAMETER_PROBLEM,
+ "interface name `%s' must be shorter than IFNAMSIZ"
+ " (%i)", arg, IFNAMSIZ-1);
+
+ strcpy(vianame, arg);
+ if ((vialen == 0) || (vialen == 1 && vianame[0] == '+'))
+ memset(mask, 0, IFNAMSIZ);
+ else if (vianame[vialen - 1] == '+') {
+ memset(mask, 0xFF, vialen - 1);
+ memset(mask + vialen - 1, 0, IFNAMSIZ - vialen + 1);
+ /* Don't remove `+' here! -HW */
+ } else {
+ /* Include nul-terminator in match */
+ memset(mask, 0xFF, vialen + 1);
+ memset(mask + vialen + 1, 0, IFNAMSIZ - vialen - 1);
+ for (i = 0; vianame[i]; i++) {
+ if (!isalnum(vianame[i])
+ && vianame[i] != '_'
+ && vianame[i] != '.') {
+ printf("Warning: wierd character in interface"
+ " `%s' (No aliases, :, ! or *).\n",
+ vianame);
+ break;
+ }
+ }
+ }
+}
+
+struct iptables_target *
+find_target(const char *name, enum ipt_tryload tryload)
+{
+ struct iptables_target *ptr;
+
+ /* Standard target? */
+ if (strcmp(name, "") == 0
+ || strcmp(name, IPTC_LABEL_ACCEPT) == 0
+ || strcmp(name, IPTC_LABEL_DROP) == 0
+ // not supported by nf-hipac
+ // || strcmp(name, IPTC_LABEL_QUEUE) == 0
+ || strcmp(name, IPTC_LABEL_RETURN) == 0)
+ name = "standard";
+
+ for (ptr = iptables_targets; ptr; ptr = ptr->next) {
+ if (strcmp(name, ptr->name) == 0)
+ break;
+ }
+
+#ifndef NO_SHARED_LIBS
+ if (!ptr && tryload != DONT_LOAD) {
+ char path[strlen(lib_dir) + sizeof("/libipt_.so")
+ + strlen(name)];
+ sprintf(path, "%s/libipt_%s.so", lib_dir, name);
+ if (dlopen(path, RTLD_NOW)) {
+ /* Found library. If it didn't register itself,
+ maybe they specified match as a target. */
+ ptr = find_target(name, DONT_LOAD);
+ if (!ptr)
+ exit_error(PARAMETER_PROBLEM,
+ "Couldn't load target `%s'\n",
+ name);
+ } else if (tryload == LOAD_MUST_SUCCEED)
+ exit_error(PARAMETER_PROBLEM,
+ "Couldn't load target `%s':%s\n",
+ name, dlerror());
+ }
+#else
+ if (ptr && !ptr->loaded) {
+ if (tryload != DONT_LOAD)
+ ptr->loaded = 1;
+ else
+ ptr = NULL;
+ }
+ if(!ptr && (tryload == LOAD_MUST_SUCCEED)) {
+ exit_error(PARAMETER_PROBLEM,
+ "Couldn't find target `%s'\n", name);
+ }
+#endif
+
+ if (ptr)
+ ptr->used = 1;
+
+ return ptr;
+}
+
+struct iptables_match *
+find_match(const char *name, enum ipt_tryload tryload,
+ struct iptables_rule_match **matches)
+{
+ struct iptables_match *ptr;
+
+ for (ptr = iptables_matches; ptr; ptr = ptr->next) {
+ if (strcmp(name, ptr->name) == 0)
+ break;
+ }
+
+#ifndef NO_SHARED_LIBS
+ if (!ptr && tryload != DONT_LOAD) {
+ char path[strlen(lib_dir) + sizeof("/libipt_.so")
+ + strlen(name)];
+ sprintf(path, "%s/libipt_%s.so", lib_dir, name);
+ if (dlopen(path, RTLD_NOW)) {
+ /* Found library. If it didn't register itself,
+ maybe they specified target as match. */
+ ptr = find_match(name, DONT_LOAD, NULL);
+
+ if (!ptr)
+ exit_error(PARAMETER_PROBLEM,
+ "Couldn't load match `%s'\n",
+ name);
+ } else if (tryload == LOAD_MUST_SUCCEED)
+ exit_error(PARAMETER_PROBLEM,
+ "Couldn't load match `%s':%s\n",
+ name, dlerror());
+ }
+#else
+ if (ptr && !ptr->loaded) {
+ if (tryload != DONT_LOAD)
+ ptr->loaded = 1;
+ else
+ ptr = NULL;
+ }
+ if(!ptr && (tryload == LOAD_MUST_SUCCEED)) {
+ exit_error(PARAMETER_PROBLEM,
+ "Couldn't find match `%s'\n", name);
+ }
+#endif
+
+ if (ptr && matches) {
+ struct iptables_rule_match **i;
+ struct iptables_rule_match *newentry;
+
+ newentry = fw_malloc(sizeof(struct iptables_rule_match));
+
+ for (i = matches; *i; i = &(*i)->next);
+ newentry->match = ptr;
+ newentry->next = NULL;
+ *i = newentry;
+ }
+
+ return ptr;
+}
+
+void
+get_kernel_version(void)
+{
+ static struct utsname uts;
+ int x = 0, y = 0, z = 0;
+
+ if (uname(&uts) == -1) {
+ fprintf(stderr, "Unable to retrieve kernel version.\n");
+ free_opts(1);
+ exit(1);
+ }
+
+ sscanf(uts.release, "%d.%d.%d", &x, &y, &z);
+ kernel_version = LINUX_VERSION(x, y, z);
+}
+
+
+/*
+ * ========== End of iptables matches/targets compatibility code ==========
+ */
+
+
+
+
+static char
+cmd2char(int option)
+{
+ const char *ptr;
+ for (ptr = cmdflags; option > 1; option >>= 1, ptr++);
+ return *ptr;
+}
+
+
+static void
+add_command(struct nfhp_cmd *message, int newcmd, int invert)
+{
+ if (invert)
+ exit_error(PARAMETER_PROBLEM, "unexpected ! flag");
+ if (message->cmd)
+ exit_error(PARAMETER_PROBLEM, "Can't use -%c with -%c\n",
+ cmd2char(newcmd), cmd2char(message->cmd));
+ message->cmd = newcmd;
+}
+
+static unsigned int
+dimid_to_option(unsigned int dimid)
+{
+ return (1 << (dimid + NUMBER_OF_OPT));
+}
+
+static void
+set_option(unsigned int *options, unsigned int std_opt, unsigned int dimid)
+{
+ unsigned int option;
+ if (std_opt)
+ option = std_opt;
+ else option = dimid_to_option(dimid);
+ if (*options & option)
+ exit_error(PARAMETER_PROBLEM,
+ "multiple flags of the same "
+ "type not allowed");
+ *options |= option;
+}
+
+
+
+void
+parse_rulenumber(const char *str, unsigned int *rulenum)
+{
+ if (string_to_number(str, 0, INT_MAX, rulenum) == -1)
+ exit_error(PARAMETER_PROBLEM,
+ "Invalid rule number `%s'", str);
+}
+
+
+
+
+static struct option *
+merge_options(struct option *oldopts, const struct option *newopts,
+ unsigned int *option_offset)
+{
+ unsigned int num_old, num_new, i;
+ struct option *merge;
+
+ for (num_old = 0; oldopts[num_old].name; num_old++);
+ for (num_new = 0; newopts[num_new].name; num_new++);
+
+ global_option_offset += OPTION_OFFSET;
+ *option_offset = global_option_offset;
+
+ merge = malloc(sizeof(struct option) * (num_new + num_old + 1));
+ memcpy(merge, oldopts, num_old * sizeof(struct option));
+ free_opts(0);
+ for (i = 0; i < num_new; i++) {
+ merge[num_old + i] = newopts[i];
+ merge[num_old + i].val += *option_offset;
+ }
+ memset(merge + num_old + num_new, 0, sizeof(struct option));
+ return merge;
+}
+
+static void
+parse_target(const char *arg, char *target)
+{
+ const char *ptr;
+ memset(target, 0, HIPAC_CHAIN_NAME_MAX_LEN);
+
+ if (strlen(arg) < 1)
+ exit_error(PARAMETER_PROBLEM,
+ "Invalid target name (too short)");
+
+ if (strlen(arg) + 1 > HIPAC_CHAIN_NAME_MAX_LEN)
+ exit_error(PARAMETER_PROBLEM,
+ "target name `%s' must be shorter than "
+ "HIPAC_CHAIN_NAME_MAX_LEN (%i)",
+ arg, HIPAC_CHAIN_NAME_MAX_LEN);
+ for (ptr = arg; *ptr; ptr++)
+ if (isspace(*ptr))
+ exit_error(PARAMETER_PROBLEM,
+ "Invalid target name `%s'", arg);
+ strcpy(target, arg);
+}
+
+void
+parse_policy(const char *targetname, u_int8_t *policy)
+{
+ if (strlen(targetname) < 1)
+ exit_error(PARAMETER_PROBLEM,
+ "Invalid target name (too short)");
+ if (strcasecmp(targetname, IPTC_LABEL_DROP) == 0){
+ *policy = TARGET_DROP;
+ return;
+ } else if (strcasecmp(targetname, IPTC_LABEL_ACCEPT) == 0){
+ *policy = TARGET_ACCEPT;
+ return;
+ }
+
+ exit_error(PARAMETER_PROBLEM,
+ "Invalid target name `%s'", targetname);
+}
+
+void
+parse_native_protocol(const char *s, struct dim_match *rule)
+{
+ char buf[256];
+ char *p;
+
+ strncpy(buf, s, sizeof(buf) - 1);
+ buf[sizeof(buf) - 1] = '\0';
+ if ((p = strrchr(buf, ':')) != NULL) {
+ *p = '\0';
+ p++;
+ rule[DIMID_PROTO].left = buf[0] ? parse_protocol(buf) : 0;
+ if (strcasecmp(buf, p) == 0){
+ rule[DIMID_PROTO].right = rule[DIMID_PROTO].left;
+ } else {
+ rule[DIMID_PROTO].right = p[0] ? parse_protocol(p) : 255;
+ if (rule[DIMID_PROTO].left > rule[DIMID_PROTO].right)
+ exit_error(PARAMETER_PROBLEM,
+ "Range Error: start value > "
+ "end value\n");
+ if ((rule[DIMID_PROTO].left == 0) &&
+ (rule[DIMID_PROTO].right == 255)){
+ return;
+ }
+ }
+ } else {
+ rule[DIMID_PROTO].left =
+ rule[DIMID_PROTO].right = parse_protocol(s);
+ }
+ set_option(&options, 0, DIMID_PROTO);
+}
+
+
+static void
+parse_native_interface(const char *arg, char *vianame)
+{
+ int vialen = strlen(arg);
+ unsigned int i;
+
+ memset(vianame, 0, IFNAMSIZ);
+
+ if (vialen + 1 > IFNAMSIZ)
+ exit_error(PARAMETER_PROBLEM,
+ "interface name `%s' must be shorter than IFNAMSIZ"
+ " (%i)", arg, IFNAMSIZ-1);
+
+ strcpy(vianame, arg);
+ if (vialen == 0)
+ exit_error(PARAMETER_PROBLEM,
+ "no interface name specified");
+ else if (vianame[vialen - 1] == '+') {
+ exit_error(PARAMETER_PROBLEM,
+ "no + matches on interfaces supported");
+ } else {
+ /* Include nul-terminator in match */
+ for (i = 0; vianame[i]; i++) {
+ if (!isalnum(vianame[i])
+ && vianame[i] != '_'
+ && vianame[i] != '.') {
+ printf("Warning: wierd character in interface"
+ " `%s' (No aliases, :, ! or *).\n",
+ vianame);
+ break;
+ }
+ }
+ }
+}
+
+static void
+parse_chain(const char *arg, char *vianame)
+{
+ int vialen = strlen(arg);
+
+ memset(vianame, 0, HIPAC_CHAIN_NAME_MAX_LEN);
+
+ if (vialen + 1 > HIPAC_CHAIN_NAME_MAX_LEN)
+ exit_error(PARAMETER_PROBLEM,
+ "chain name `%s' must be shorter than "
+ "HIPAC_CHAIN_NAME_MAX_LEN (%i)",
+ arg, HIPAC_CHAIN_NAME_MAX_LEN);
+
+ if (vialen == 0)
+ exit_error(PARAMETER_PROBLEM,
+ "no chain name specified");
+ strcpy(vianame, arg);
+}
+
+static int
+parse_state(const char *state, size_t strlen, unsigned int* sinfo)
+{
+ if (strncasecmp(state, "INVALID", strlen) == 0)
+ *sinfo = NFHP_STATE_INVALID;
+ else if (strncasecmp(state, "UNTRACKED", strlen) == 0)
+ *sinfo = NFHP_STATE_UNTRACKED;
+ else if (strncasecmp(state, "NEW", strlen) == 0)
+ *sinfo = NFHP_STATE_NEW;
+ else if (strncasecmp(state, "ESTABLISHED", strlen) == 0)
+ *sinfo = NFHP_STATE_ESTABLISHED;
+ else if (strncasecmp(state, "RELATED", strlen) == 0)
+ *sinfo = NFHP_STATE_RELATED;
+ else
+ return 0;
+ return 1;
+}
+
+static void
+parse_states(const char *arg, struct dim_match *rule)
+
+{
+ unsigned int left, right;
+ const char *comma;
+
+ if ((comma = strchr(arg, ',')) != NULL) {
+ if (comma == arg || !parse_state(arg, comma - arg, &left))
+ exit_error(PARAMETER_PROBLEM, "Bad state `%s'", arg);
+ if (strlen(comma + 1) == 0 ||
+ !parse_state(comma + 1, strlen(comma + 1), &right))
+ exit_error(PARAMETER_PROBLEM, "Bad state `%s'", arg);
+ if (left == right) {
+ rule[DIMID_STATE].left =
+ rule[DIMID_STATE].right = left;
+ } else if ((left == NFHP_STATE_ESTABLISHED &&
+ right == NFHP_STATE_RELATED) ||
+ (left == NFHP_STATE_RELATED &&
+ right == NFHP_STATE_ESTABLISHED)) {
+ rule[DIMID_STATE].left = NFHP_STATE_ESTABLISHED;
+ rule[DIMID_STATE].right = NFHP_STATE_RELATED;
+ } else {
+ exit_error(PARAMETER_PROBLEM, "Bad state `%s'", arg);
+ }
+ } else {
+ if (strlen(arg) == 0 ||
+ !parse_state(arg, strlen(arg), &left))
+ exit_error(PARAMETER_PROBLEM, "Bad state `%s'", arg);
+ rule[DIMID_STATE].left =
+ rule[DIMID_STATE].right = left;
+ }
+}
+
+
+
+static struct in_addr *
+hbo_parse_hostnetwork(const char *name, unsigned int *naddrs)
+{
+ struct in_addr *addrp, *addrptmp;
+ int i;
+
+ if ((addrptmp = dotted_to_addr(name)) != NULL ||
+ (addrptmp = network_to_addr(name)) != NULL) {
+ addrp = fw_malloc(sizeof(struct in_addr));
+ addrp->s_addr = ntohl(addrptmp->s_addr);
+ *naddrs = 1;
+ return addrp;
+ }
+ if ((addrp = host_to_addr(name, naddrs)) != NULL){
+ for (i = 0; i < *naddrs; i++)
+ addrp[i].s_addr = ntohl(addrp[i].s_addr);
+ return addrp;
+ }
+
+ exit_error(PARAMETER_PROBLEM, "host/network `%s' not found", name);
+ return (struct in_addr *) NULL;
+}
+
+static struct in_addr *
+hbo_parse_mask(char *mask)
+{
+ static struct in_addr maskaddr;
+ struct in_addr *addrp;
+ unsigned int bits;
+ int i;
+ u_int32_t f;
+
+ if ((addrp = dotted_to_addr(mask)) != NULL){
+ addrp->s_addr = ntohl(addrp->s_addr);
+ if (addrp->s_addr == 0xFFFFFFFFL){
+ return addrp;
+ }
+ i = 32;
+ f = 0xFFFFFFFEL;
+ while (--i >= 0 && addrp->s_addr != f)
+ f <<= 1;
+ if (i >= 0)
+ return addrp;
+ else
+ exit_error(PARAMETER_PROBLEM,
+ "invalid mask `%s' specified", mask);
+ }
+ if (string_to_number(mask, 0, 32, &bits) == -1)
+ exit_error(PARAMETER_PROBLEM,
+ "invalid mask `%s' specified", mask);
+ if (bits != 0) {
+ maskaddr.s_addr = (0xFFFFFFFF << (32 - bits));
+ return &maskaddr;
+ }
+
+ maskaddr.s_addr = 0L;
+ return &maskaddr;
+}
+
+void
+hbo_parse_hostnetworkmask(const char *name, const int dimid,
+ struct dim_match *rule, struct in_addr **addrpp,
+ unsigned int *naddrs)
+{
+ struct in_addr *addrp;
+ struct in_addr addr, mask;
+ char buf[256];
+ char *p;
+
+
+ strncpy(buf, name, sizeof(buf) - 1);
+ buf[sizeof(buf) - 1] = '\0';
+ if ((p = strrchr(buf, '/')) != NULL) {
+ *p = '\0';
+ addrp = hbo_parse_mask(p + 1);
+ mask = *addrp;
+ if (mask.s_addr == 0L)
+ strcpy(buf, "0.0.0.0");
+ addrp = hbo_parse_hostnetwork(buf, naddrs);
+ addr = *addrp;
+ free(addrp);
+ *naddrs = 1;
+ rule[dimid].left = addr.s_addr & mask.s_addr;
+ rule[dimid].right = addr.s_addr | (~mask.s_addr);
+ if ((rule[dimid].left == 0) &&
+ (rule[dimid].right == 0xFFFFFFFF)){
+ *naddrs = 0;
+ return;
+ }
+ } else if ((p = strrchr(buf, ':')) != NULL) {
+ *p = '\0';
+ *naddrs = 1;
+ addrp = hbo_parse_hostnetwork(buf, naddrs);
+ addr = *addrp;
+ free(addrp);
+ if (strcasecmp(buf, p + 1) == 0){
+ mask = addr;
+ } else {
+ addrp = hbo_parse_hostnetwork(p + 1, naddrs);
+ mask = *addrp;
+ free(addrp);
+ if (addr.s_addr > mask.s_addr)
+ exit_error(PARAMETER_PROBLEM,
+ "Range Error: start value > "
+ "end value\n");
+ if ((addr.s_addr == 0) &&
+ (mask.s_addr == 0xFFFFFFFF)){
+ *naddrs = 0;
+ return;
+ }
+ }
+ rule[dimid].left = addr.s_addr;
+ rule[dimid].right = mask.s_addr;
+ } else {
+ addrp = hbo_parse_hostnetwork(buf, naddrs);
+ rule[dimid].left = addrp->s_addr;
+ rule[dimid].right = addrp->s_addr;
+ if (*naddrs > 1)
+ *addrpp = addrp;
+ else
+ free(addrp);
+ }
+ set_option(&options, 0, dimid);
+}
+
+static int
+service_to_port(const char *name)
+{
+ struct servent *service;
+
+ if ((service = getservbyname(name, NULL)) != NULL)
+ return ntohs((unsigned short) service->s_port);
+
+ return -1;
+}
+
+static int
+parse_port(const char *port)
+{
+ unsigned int portnum;
+
+ if (string_to_number(port, 0, 65535, &portnum) != -1 ||
+ (portnum = service_to_port(port)) != -1)
+ return (int )portnum;
+
+ exit_error(PARAMETER_PROBLEM,
+ "invalid port/service `%s' specified", port);
+ return 0;
+}
+
+static void
+parse_ports(const char *portstring, const int dimid, struct dim_match *rule)
+{
+ char *buffer;
+ char *cp;
+
+ buffer = strdup(portstring);
+ if ((cp = strchr(buffer, ':')) == NULL)
+ rule[dimid].left =
+ rule[dimid].right = parse_port(buffer);
+ else {
+ *cp = '\0';
+ cp++;
+
+ rule[dimid].left = buffer[0] ? parse_port(buffer) : 0;
+ rule[dimid].right = cp[0] ? parse_port(cp) : 0xFFFF;
+
+ if (rule[dimid].left > rule[dimid].right)
+ exit_error(PARAMETER_PROBLEM,
+ "invalid portrange (min > max)");
+ }
+ free(buffer);
+ if ((rule[dimid].left == 0) &&
+ (rule[dimid].right == 65535))
+ return;
+ set_option(&options, 0, dimid);
+}
+
+static int
+parse_ttl(const char *ttlstring)
+{
+ unsigned int ttl;
+
+ if (string_to_number(ttlstring, 0, 255, &ttl) != -1)
+ return (int )ttl;
+
+ exit_error(PARAMETER_PROBLEM,
+ "invalid TTL `%s' specified", ttlstring);
+ return 0;
+}
+
+static void
+parse_ttls(const char *ttlstring, struct dim_match *rule)
+{
+ char *buffer;
+ char *cp;
+
+ buffer = strdup(ttlstring);
+ if ((cp = strchr(buffer, ':')) == NULL)
+ rule[DIMID_TTL].left =
+ rule[DIMID_TTL].right = parse_ttl(buffer);
+ else {
+ *cp = '\0';
+ cp++;
+
+ rule[DIMID_TTL].left = buffer[0] ? parse_ttl(buffer) : 0;
+ rule[DIMID_TTL].right = cp[0] ? parse_ttl(cp) : 0xFF;
+
+ if (rule[DIMID_TTL].left > rule[DIMID_TTL].right)
+ exit_error(PARAMETER_PROBLEM,
+ "invalid TTL range (min > max)");
+ if ((rule[DIMID_TTL].left == 0) &&
+ (rule[DIMID_TTL].right == 0xFF)){
+ free(buffer);
+ return;
+ }
+ }
+ free(buffer);
+ set_option(&options, 0, DIMID_TTL);
+}
+
+static void
+parse_syn(const char *flag, struct dim_match *rule)
+{
+ if (strcmp(flag, "SYN") == 0) {
+ rule[DIMID_SYN].left = 0;
+ rule[DIMID_SYN].right = 0;
+ return;
+ } else if (strcmp(flag, "NOT-SYN") == 0){
+ rule[DIMID_SYN].left = 1;
+ rule[DIMID_SYN].right = 65535;
+ return;
+ } else
+ exit_error(PARAMETER_PROBLEM,
+ "Unknown TCP flag `%s'", flag);
+}
+
+static void
+parse_icmp(const char *icmptype, struct dim_match *rule)
+{
+ unsigned int limit = sizeof(icmp_codes)/sizeof(struct icmp_names);
+ unsigned int match = limit;
+ unsigned int i;
+
+ for (i = 0; i < limit; i++) {
+ if (strncasecmp(icmp_codes[i].name, icmptype, strlen(icmptype))
+ == 0) {
+ if (match != limit)
+ exit_error(PARAMETER_PROBLEM,
+ "Ambiguous ICMP type `%s':"
+ " `%s' or `%s'?",
+ icmptype,
+ icmp_codes[match].name,
+ icmp_codes[i].name);
+ match = i;
+ }
+ }
+
+ if (match != limit) {
+ rule[DIMID_ICMP_TYPE].left = (icmp_codes[match].type << 8) +
+ icmp_codes[match].code_min;
+ rule[DIMID_ICMP_TYPE].right = (icmp_codes[match].type << 8) +
+ icmp_codes[match].code_max;
+ } else {
+ char *slash;
+ char buffer[strlen(icmptype) + 1];
+ unsigned int type, code;
+
+ strcpy(buffer, icmptype);
+ slash = strchr(buffer, '/');
+
+ if (slash)
+ *slash = '\0';
+
+ if (string_to_number(buffer, 0, 255, &type) == -1)
+ exit_error(PARAMETER_PROBLEM,
+ "Invalid ICMP type `%s'\n", buffer);
+ if (slash) {
+ if (string_to_number(slash+1, 0, 255, &code) == -1)
+ exit_error(PARAMETER_PROBLEM,
+ "Invalid ICMP code `%s'\n",
+ slash+1);
+ rule[DIMID_ICMP_TYPE].left =
+ rule[DIMID_ICMP_TYPE].right =
+ (type << 8) + code;
+ } else {
+ rule[DIMID_ICMP_TYPE].left = type << 8;
+ rule[DIMID_ICMP_TYPE].right = (type << 8) + 0xFF;
+ }
+ }
+}
+
+static void
+generic_opt_check(int command, int options)
+{
+ int i, k;
+
+ for (i = 0; i < NUMBER_OF_OPT; i++) {
+ if (options & (1<rule.r.native_mct = 0;
+ for (i = 0; i < NUMBER_OF_DIM; i++){
+ if (!(options & dimid_to_option(i)))
+ continue;
+ message->rule.m[message->rule.r.native_mct].dimid = i;
+ message->rule.m[message->rule.r.native_mct].invert =
+ rule[i].invert;
+ message->rule.m[message->rule.r.native_mct].left =
+ rule[i].left;
+ message->rule.m[message->rule.r.native_mct].right =
+ rule[i].right;
+ message->rule.r.native_mct++;
+ }
+}
+
+static char *
+port_to_service(int port)
+{
+ struct servent *service;
+
+ if ((service = getservbyport(htons(port), NULL)))
+ return service->s_name;
+
+ return NULL;
+}
+
+static void
+print_port(u_int16_t port, int numeric)
+{
+ char *service;
+
+ if (numeric || (service = port_to_service(port)) == NULL)
+ printf("%u", port);
+ else
+ printf("%s", service);
+}
+
+static void
+print_ports(const char *name, u_int32_t min, u_int32_t max, u_int8_t invert,
+ int numeric)
+{
+ printf("%s", name);
+ if (min == max) {
+ printf(": ");
+ if (invert)
+ printf("!");
+ print_port(min, numeric);
+ } else {
+
+ printf("s: ");
+ if (invert)
+ printf("!");
+ print_port(min, numeric);
+ printf(":");
+ print_port(max, numeric);
+ }
+ printf(" ");
+}
+
+static void
+print_ttls(u_int32_t min, u_int32_t max)
+{
+ printf("ttl");
+ if (min == max) {
+ printf(": ");
+ printf("%u", min);
+ } else {
+
+ printf("s: %u:%u", min, max);
+ }
+ printf(" ");
+}
+
+static void
+print_icmptype(u_int8_t type, u_int8_t code_min, u_int8_t code_max,
+ int numeric)
+{
+ printf("icmp: ");
+ if (!numeric) {
+ unsigned int i;
+
+ for (i = 0;
+ i < sizeof(icmp_codes)/sizeof(struct icmp_names);
+ i++) {
+ if (icmp_codes[i].type == type
+ && icmp_codes[i].code_min == code_min
+ && icmp_codes[i].code_max == code_max)
+ break;
+ }
+
+ if (i != sizeof(icmp_codes)/sizeof(struct icmp_names)) {
+ printf("%s ", icmp_codes[i].name);
+ return;
+ }
+ }
+
+ printf("type %u ", type);
+ if (code_min == 0 && code_max == 0xFF)
+ printf(" ");
+ else if (code_min == code_max)
+ printf("code %u ", code_min);
+ else
+ printf("codes %u-%u ", code_min, code_max);
+ printf(" ");
+}
+
+static void
+print_syn(u_int32_t min, u_int32_t max)
+{
+ printf("flags: ");
+ if (!min & !max) {
+ printf("syn");
+ } else if ((min == 1) & (max == 65535)){
+ printf("not-syn");
+ } else
+ printf("unknown");
+ printf(" ");
+}
+
+static void
+print_state(unsigned int left, unsigned int right,
+ u_int8_t invert)
+{
+ printf("state: ");
+ if (invert)
+ printf("!");
+ if (left != right)
+ printf("ESTABLISHED,RELATED");
+ else if (left == NFHP_STATE_INVALID)
+ printf("INVALID");
+ else if (left == NFHP_STATE_NEW)
+ printf("NEW");
+ else if (left == NFHP_STATE_RELATED)
+ printf("RELATED");
+ else if (left == NFHP_STATE_ESTABLISHED)
+ printf("ESTABLISHED");
+ else if (left == NFHP_STATE_UNTRACKED)
+ printf("UNTRACKED");
+ printf(" ");
+}
+
+static char *
+print_ip(int dimid, int numeric, struct dim_match *rule)
+{
+ u_int32_t left = htonl(rule[dimid].left);
+ u_int32_t right = htonl(rule[dimid].right);
+ u_int32_t mask;
+ int i = 32;
+ u_int32_t f = 0xFFFFFFFEL;
+ static char buf[BUFSIZ];
+
+ if (left == right){
+ if (numeric)
+ sprintf(buf, "%s",
+ addr_to_dotted((struct in_addr *) &left));
+ else sprintf(buf, "%s",
+ addr_to_anyname((struct in_addr*) &left));
+ return buf;
+ }
+
+ mask = ~(rule[dimid].right - rule[dimid].left);
+ while (--i >= 0 && mask != f)
+ f <<= 1;
+
+ if (i >= 0 && (((rule[dimid].left) & f) == rule[dimid].left)){
+ if (numeric)
+ sprintf(buf, "%s/%d",
+ addr_to_dotted((struct in_addr *) &left), i);
+ else sprintf(buf, "%s/%d",
+ addr_to_anyname((struct in_addr*) &left), i);
+ return buf;
+ } else {
+ if (numeric){
+ sprintf(buf, "%s:",
+ addr_to_dotted((struct in_addr *) &left));
+ strcat(buf, addr_to_dotted((struct in_addr *) &right));
+ }else {
+ sprintf(buf, "%s:",
+ addr_to_anyname((struct in_addr*) &left));
+ strcat(buf, addr_to_anyname((struct in_addr*) &right));
+ }
+ return buf;
+ }
+}
+
+static void
+print_icmptypes(void)
+{
+ unsigned int i;
+ printf("Valid ICMP Types:");
+
+ for (i = 0; i < sizeof(icmp_codes)/sizeof(struct icmp_names); i++) {
+ if (i && icmp_codes[i].type == icmp_codes[i-1].type) {
+ if (icmp_codes[i].code_min == icmp_codes[i-1].code_min
+ && (icmp_codes[i].code_max
+ == icmp_codes[i-1].code_max))
+ printf(" (%s)", icmp_codes[i].name);
+ else
+ printf("\n %s", icmp_codes[i].name);
+ }
+ else
+ printf("\n%s", icmp_codes[i].name);
+ }
+ printf("\n");
+ exit(0);
+}
+
+static char *
+proto_to_name(u_int8_t proto)
+{
+ unsigned int i;
+
+ if (proto) {
+ struct protoent *pent = getprotobynumber(proto);
+ if (pent)
+ return pent->p_name;
+ }
+
+ for (i = 0; i < sizeof(chain_protos)/sizeof(struct pprot); i++)
+ if (chain_protos[i].num == proto)
+ return chain_protos[i].name;
+
+ return NULL;
+}
+
+static void
+print_rule(struct nfhp_list_rule* pt_rule, int is_nfhp_rule)
+{
+ int i;
+ unsigned int rule_options = 0;
+ struct dim_match rule[NUMBER_OF_DIM + 1];
+ struct iptables_target *target = NULL;
+ struct ipt_entry_target *t = NULL;
+
+ for (i = 0; i < pt_rule->r.native_mct; i++){
+ set_option(&rule_options, 0, pt_rule->r.first_match[i].dimid);
+ rule[pt_rule->r.first_match[i].dimid].invert =
+ pt_rule->r.first_match[i].invert;
+ rule[pt_rule->r.first_match[i].dimid].left =
+ pt_rule->r.first_match[i].left;
+ rule[pt_rule->r.first_match[i].dimid].right =
+ pt_rule->r.first_match[i].right;
+ }
+
+ if (options & OPT_LINENUMBERS)
+ printf("%-5u ", pt_rule->r.pos);
+
+ if (pt_rule->r.action == TARGET_ACCEPT){
+ printf("%-9s ", IPTC_LABEL_ACCEPT);
+ } else if (pt_rule->r.action == TARGET_DROP){
+ printf("%-9s ", IPTC_LABEL_DROP);
+ } else if (pt_rule->r.action == TARGET_RETURN){
+ printf("%-9s ", IPTC_LABEL_RETURN);
+ } else if (pt_rule->r.action == TARGET_NONE){
+ printf("%-9s ", " ");
+ } else if (pt_rule->r.action == TARGET_DUMMY){
+ printf("%-9s ", "DUMMY");
+ } else if (HAS_CHAIN_TARGET(&pt_rule->r)){
+ if (is_nfhp_rule){
+ printf("%-9s ",
+ (char *) ((struct nfhp_rule *) pt_rule)->e
+ + ((struct nfhp_rule *)
+ pt_rule)->e->target_offset);
+ } else {
+ printf("%-9s ", CHAIN_TARGET(&pt_rule->r));
+ }
+ } else if (HAS_IPT_TARGET(&pt_rule->r)){
+ } else {
+ printf("%-9s ", "UNKNOWN");
+ }
+
+ if (rule_options & dimid_to_option(DIMID_PROTO)){
+ char *pname = proto_to_name(rule[DIMID_PROTO].left);
+ printf("%c", rule[DIMID_PROTO].invert ? '!' : ' ');
+ if (rule[DIMID_PROTO].left != rule[DIMID_PROTO].right)
+ printf("%-1hu:%-2hu ", rule[DIMID_PROTO].left,
+ rule[DIMID_PROTO].right);
+ else if (pname)
+ printf("%-4s ", pname);
+ else
+ printf("%-4hu ", rule[DIMID_PROTO].left);
+ } else printf(" %-4s ", "all");
+
+ if (rule_options & dimid_to_option(DIMID_FRAGMENT)){
+ if (rule[DIMID_FRAGMENT].left == 0)
+ printf("%-3s ", "!f");
+ else printf("%-3s ", "-f");
+ } else printf("%-3s ", "--");
+
+ if (options & OPT_VERBOSE){
+ if (*pt_rule->indev){
+ if (rule_options & dimid_to_option(DIMID_INIFACE))
+ printf("%c", rule[DIMID_INIFACE].invert ?
+ '!' : ' ');
+ printf("%-6s ", pt_rule->indev);
+ } else {
+ if (options & (OPT_NUMERIC | OPT_DEBUGN))
+ printf(" %-6s ", "*");
+ else printf(" %-6s ", "any");
+ }
+ if (*pt_rule->outdev){
+ if (rule_options & dimid_to_option(DIMID_OUTIFACE))
+ printf("%c", rule[DIMID_OUTIFACE].invert ?
+ '!' : ' ');
+ printf("%-6s ", pt_rule->outdev);
+ } else {
+ if (options & (OPT_NUMERIC | OPT_DEBUGN))
+ printf(" %-6s ", "*");
+ else printf(" %-6s ", "any");
+ }
+ }
+
+ if (rule_options & dimid_to_option(DIMID_SRC_IP)){
+ printf("%c", rule[DIMID_SRC_IP].invert ? '!' : ' ');
+ printf("%-19s ", print_ip(DIMID_SRC_IP,
+ options &
+ (OPT_NUMERIC | OPT_DEBUGN), rule));
+ } else {
+ if (options & (OPT_NUMERIC | OPT_DEBUGN))
+ printf(" %-19s ", "0.0.0.0/0");
+ else printf(" %-19s ", "anywhere");
+ }
+
+ if (rule_options & dimid_to_option(DIMID_DEST_IP)){
+ printf("%c", rule[DIMID_DEST_IP].invert ? '!' : ' ');
+ printf("%-19s ", print_ip(DIMID_DEST_IP,
+ options &
+ (OPT_NUMERIC | OPT_DEBUGN), rule));
+ } else {
+ if (options & (OPT_NUMERIC | OPT_DEBUGN))
+ printf(" %-19s ", "0.0.0.0/0");
+ else printf(" %-19s ", "anywhere");
+ }
+
+ if (rule_options & dimid_to_option(DIMID_SPORT))
+ print_ports("spt", rule[DIMID_SPORT].left,
+ rule[DIMID_SPORT].right,
+ rule[DIMID_SPORT].invert,
+ options & (OPT_NUMERIC | OPT_DEBUGN));
+
+ if (rule_options & dimid_to_option(DIMID_DPORT))
+ print_ports("dpt", rule[DIMID_DPORT].left,
+ rule[DIMID_DPORT].right,
+ rule[DIMID_DPORT].invert,
+ options & (OPT_NUMERIC | OPT_DEBUGN));
+
+ if (rule_options & dimid_to_option(DIMID_SYN))
+ print_syn(rule[DIMID_SYN].left, rule[DIMID_SYN].right);
+
+ if (rule_options & dimid_to_option(DIMID_ICMP_TYPE))
+ print_icmptype(rule[DIMID_ICMP_TYPE].left >> 8,
+ rule[DIMID_ICMP_TYPE].left & 0xFF,
+ rule[DIMID_ICMP_TYPE].right & 0xFF,
+ options & (OPT_NUMERIC | OPT_DEBUGN));
+
+ if (rule_options & dimid_to_option(DIMID_STATE))
+ print_state(rule[DIMID_STATE].left, rule[DIMID_STATE].right,
+ rule[DIMID_STATE].invert);
+
+ if (rule_options & dimid_to_option(DIMID_TTL))
+ print_ttls(rule[DIMID_TTL].left, rule[DIMID_TTL].right);
+
+ if (target || HAS_IPT_MATCH(&pt_rule->r)){
+ struct ipt_ip ip;
+ memset(&ip, 0, sizeof(ip));
+ if (rule_options & dimid_to_option(DIMID_PROTO))
+ if (!rule[DIMID_PROTO].invert)
+ ip.proto = rule[DIMID_PROTO].left;
+ if (HAS_IPT_MATCH(&pt_rule->r)){
+ }
+ if (target) {
+ if (target->print)
+ target->print(&ip, t,
+ FMT_PRINT_RULE &
+ (FMT_NUMERIC &&
+ (options &
+ (OPT_NUMERIC | OPT_DEBUGN))));
+ } else if (t && t->u.target_size != sizeof(*t))
+ printf("[%u bytes of unknown target data] ",
+ (unsigned int)(t->u.target_size - sizeof(*t)));
+ }
+
+ if (options & OPT_DEBUGN || options & OPT_DEBUG) {
+ printf("||");
+ for (i = 0; i < NUMBER_OF_DIM; i++) {
+ if (rule_options & dimid_to_option(i)) {
+ printf(" Dim%d:%s%u", i, rule[i].invert ?
+ "!" : "", rule[i].left);
+ if (rule[i].left != rule[i].right)
+ printf("-%u", rule[i].right);
+ }
+ }
+ }
+
+ printf("\n");
+
+
+}
+
+static void
+print_header(const char *chain, int policy)
+{
+ printf("Chain %s ", chain);
+ if (policy == TARGET_ACCEPT)
+ printf("(policy %s)\n", IPTC_LABEL_ACCEPT);
+ else if (policy == TARGET_DROP)
+ printf("(policy %s)\n", IPTC_LABEL_DROP);
+ else printf("(unknown policy)\n");
+
+ if (options & OPT_LINENUMBERS)
+ printf("%-5s ", "num");
+
+ printf("%-9s ", "target");
+ printf(" %-4s ", "prot");
+ printf("%-3s ", "opt");
+ if (options & OPT_VERBOSE) {
+ printf(" %-6s ", "in");
+ printf(" %-6s ", "out");
+ }
+ printf(" %-19s ", "source");
+ printf(" %-19s ", "destination");
+ printf("\n");
+}
+
+static int
+process_list(const void *data, int len)
+{
+ static u_int32_t rulenum = 0;
+ static u_int32_t chain_rulenum = 0;
+ const struct nfhp_list_chain *current_chain;
+ const void *p = data;
+
+ if (listing_start) {
+ current_chain = (struct nfhp_list_chain *) p;
+ rulenum = 0;
+ chain_rulenum = current_chain->rule_num;
+ listing_start = 0;
+ p += sizeof(struct nfhp_list_chain);
+ print_header(current_chain->label,
+ current_chain->policy);
+ }
+ while (p < data + len) {
+ if (rulenum < chain_rulenum) {
+ int rule_size =
+ IPT_ALIGN(((struct nfhp_list_rule *) p)->r.size
+ + offsetof(struct nfhp_list_rule,
+ r));
+ if (len - (p - data) < rule_size) {
+ fprintf(stderr, "%s: incomplete rule\n",
+ __FUNCTION__);
+ return -1;
+ }
+ print_rule((struct nfhp_list_rule *) p, 0);
+ p += rule_size;
+ rulenum++;
+ } else {
+ if (len - (p - data) <
+ sizeof(struct nfhp_list_chain)) {
+ fprintf(stderr, "%s: incomplete chain\n",
+ __FUNCTION__);
+ return -1;
+ }
+ printf("\n");
+ current_chain = (struct nfhp_list_chain *) p;
+ rulenum = 0;
+ chain_rulenum = current_chain->rule_num;
+ p += sizeof(struct nfhp_list_chain);
+ print_header(current_chain->label,
+ current_chain->policy);
+
+ }
+ }
+ return 0;
+}
+
+static int
+handle_error(int ret, int err)
+{
+ if (ret == 0 && err == 0) {
+ return 0;
+ }
+
+ switch (ret) {
+ case 0:
+ fprintf(stderr, "%s\n", nlhp_error(err));
+ break;
+ case -1:
+ perror("");
+ break;
+ case -2:
+ fprintf(stderr, "Read timeout occured\n");
+ break;
+ case -3:
+ fprintf(stderr, "Not enough memory available\n");
+ break;
+ case -4:
+ fprintf(stderr, "List handler failed\n");
+ break;
+ }
+ return ret;
+}
+
+int
+do_command(int argc, char *argv[])
+{
+ int c, invert = 0, ipt_match_num = 0, cmd_return, ret = 1, err;
+ struct iptables_target *ipt_target = NULL;
+ u_int32_t ipt_matches_size = 0;
+ char *protocol = NULL;
+ char jumpto[HIPAC_CHAIN_NAME_MAX_LEN] = "";
+ struct nfhp_cmd* message = NULL;
+ struct dim_match rule[NUMBER_OF_DIM];
+ struct in_addr *s_ips = NULL, *d_ips = NULL;
+ unsigned int ns_addrs = 0, nd_addrs = 0;
+ struct iptables_target *t;
+ struct iptables_match *m;
+ struct iptables_rule_match *matches = NULL;
+ struct iptables_rule_match *matchp;
+
+ /* re-set optind to 0 in case do_command gets called
+ * a second time */
+ optind = 0;
+
+ /* clear mflags in case do_command gets called a second time
+ * (we clear the global list of all matches for security)*/
+ for (m = iptables_matches; m; m = m->next) {
+ m->mflags = 0;
+ }
+ for (t = iptables_targets; t; t = t->next) {
+ t->tflags = 0;
+ t->used = 0;
+ }
+ memset(rule, 0, sizeof(struct dim_match) * NUMBER_OF_DIM);
+ message = nlhp_new_cmd(CMD_NONE, NULL);
+ if (!message)
+ perror("nf-hipac: low memory");
+
+ message->rule.r.action = TARGET_NONE;
+
+ /* Suppress error messages: we may add new options if we
+ demand-load a protocol. */
+ opterr = 0;
+ while ((c = getopt_long (argc, argv,
+ "-A:D:I:R:L::F::Z::N:X::E:P:t:m:s:d:p:i:j:c:o:fvnxVh::",
+ opts, NULL)) != -1){
+ switch(c){
+ case 'A':
+ add_command(message, CMD_APPEND, invert);
+ parse_chain(optarg, message->chain.label);
+ break;
+ case 'D':
+ if (optind < argc && argv[optind][0] != '-'
+ && argv[optind][0] != '!') {
+ add_command(message, CMD_DELETE_POS, invert);
+ parse_chain(optarg, message->chain.label);
+ parse_rulenumber(argv[optind++],
+ &message->rule.r.pos);
+ } else {
+ add_command(message, CMD_DELETE_RULE, invert);
+ parse_chain(optarg, message->chain.label);
+ }
+ break;
+ case 'R':
+ add_command(message, CMD_REPLACE, invert);
+ parse_chain(optarg, message->chain.label);
+ if (optind < argc && argv[optind][0] != '-'
+ && argv[optind][0] != '!')
+ parse_rulenumber(argv[optind++],
+ &message->rule.r.pos);
+ else
+ exit_error(PARAMETER_PROBLEM,
+ "-%c requires a rule number",
+ cmd2char(CMD_REPLACE));
+ break;
+ case 'I':
+ add_command(message, CMD_INSERT, invert);
+ parse_chain(optarg, message->chain.label);
+ if (optind < argc && argv[optind][0] != '-'
+ && argv[optind][0] != '!')
+ parse_rulenumber(argv[optind++],
+ &message->rule.r.pos);
+ else message->rule.r.pos = 1;
+ break;
+ case 'L':
+ add_command(message, CMD_LIST, invert);
+ if (optarg) parse_chain(optarg, message->chain.label);
+ else if (optind < argc && argv[optind][0] != '-'
+ && argv[optind][0] != '!')
+ parse_chain(argv[optind++],
+ message->chain.label);
+ break;
+ case 'F':
+ add_command(message, CMD_FLUSH, invert);
+ if (optarg) parse_chain(optarg, message->chain.label);
+ else if (optind < argc && argv[optind][0] != '-'
+ && argv[optind][0] != '!')
+ parse_chain(argv[optind++],
+ message->chain.label);
+ break;
+ case 'N':
+ add_command(message, CMD_NEW_CHAIN, invert);
+ if (optarg && (*optarg == '-' || *optarg == '!'))
+ exit_error(PARAMETER_PROBLEM,
+ "chain name not allowed to start "
+ "with `%c'\n", *optarg);
+ parse_chain(optarg, message->chain.label);
+ if (strcmp(optarg, IPTC_LABEL_ACCEPT) == 0
+ || strcmp(optarg, IPTC_LABEL_DROP) == 0
+ || strcmp(optarg, IPTC_LABEL_QUEUE) == 0
+ || strcmp(optarg, IPTC_LABEL_RETURN) == 0
+ || find_target(optarg, TRY_LOAD))
+ exit_error(PARAMETER_PROBLEM,
+ "chain name may not clash "
+ "with target name\n");
+ break;
+ case 'X':
+ add_command(message, CMD_DELETE_CHAIN, invert);
+ if (optarg) parse_chain(optarg, message->chain.label);
+ else if (optind < argc && argv[optind][0] != '-'
+ && argv[optind][0] != '!')
+ parse_chain(argv[optind++],
+ message->chain.label);
+ break;
+ case 'E':
+ add_command(message, CMD_RENAME_CHAIN, invert);
+ parse_chain(optarg, message->chain.label);
+ if (optind < argc && argv[optind][0] != '-'
+ && argv[optind][0] != '!'){
+ parse_chain(argv[optind++],
+ message->chain.newlabel);
+ if (strcmp(message->chain.newlabel,
+ IPTC_LABEL_ACCEPT) == 0
+ || strcmp(message->chain.newlabel,
+ IPTC_LABEL_DROP) == 0
+ || strcmp(message->chain.newlabel,
+ IPTC_LABEL_QUEUE) == 0
+ || strcmp(message->chain.newlabel,
+ IPTC_LABEL_RETURN) == 0
+ || find_target(message->chain.newlabel,
+ TRY_LOAD))
+ exit_error(PARAMETER_PROBLEM,
+ "chain name may not clash "
+ "with target name\n");
+ } else
+ exit_error(PARAMETER_PROBLEM,
+ "-%c requires old-chain-name and "
+ "new-chain-name",
+ cmd2char(CMD_RENAME_CHAIN));
+
+ break;
+ case 'P':
+ add_command(message, CMD_SET_POLICY, invert);
+ parse_chain(optarg, message->chain.label);
+ if (optind < argc && argv[optind][0] != '-'
+ && argv[optind][0] != '!')
+ parse_policy(argv[optind++],
+ &message->chain.policy);
+ else
+ exit_error(PARAMETER_PROBLEM,
+ "-%c requires a chain and a policy",
+ cmd2char(CMD_SET_POLICY));
+ break;
+ case 'h':
+ if (!optarg)
+ optarg = argv[optind];
+ if ((optarg ) && (strcasecmp(optarg, "icmp") == 0))
+ print_icmptypes();
+ exit_printhelp(matches);
+
+ /* Options */
+ case 's':
+ check_inverse(optarg, &invert, &optind, argc);
+ hbo_parse_hostnetworkmask(argv[optind-1],
+ DIMID_SRC_IP, rule,
+ &s_ips, &ns_addrs);
+ rule[DIMID_SRC_IP].invert = invert;
+ break;
+
+ case 'd':
+ check_inverse(optarg, &invert, &optind, argc);
+ hbo_parse_hostnetworkmask(argv[optind-1],
+ DIMID_DEST_IP, rule,
+ &d_ips, &nd_addrs);
+ rule[DIMID_DEST_IP].invert = invert;
+ break;
+ case 'p':
+ check_inverse(optarg, &invert, &optind, argc);
+ for (protocol = argv[optind-1]; *protocol; protocol++)
+ *protocol = tolower(*protocol);
+ parse_native_protocol(argv[optind-1], rule);
+ rule[DIMID_PROTO].invert = invert;
+ if (!invert &&
+ (rule[DIMID_PROTO].left ==
+ rule[DIMID_PROTO].right)){
+ message->rule.e->ip.proto =
+ rule[DIMID_PROTO].left;
+ }
+ break;
+ case 'f':
+ set_option(&options, 0, DIMID_FRAGMENT);
+ if (invert){
+ rule[DIMID_FRAGMENT].left =
+ rule[DIMID_FRAGMENT].right = 0;
+ } else {
+ rule[DIMID_FRAGMENT].left = 1;
+ rule[DIMID_FRAGMENT].right = 65535;
+ }
+ break;
+ case 'j':
+ if (invert)
+ exit_error(PARAMETER_PROBLEM,
+ "cannot have ! before -%s",
+ optflags[OPT_VERBOSE]);
+ set_option(&options, OPT_JUMP, 0);
+ parse_target(optarg, jumpto);
+ if (strcmp(optarg, IPTC_LABEL_ACCEPT) == 0){
+ message->rule.r.action = TARGET_ACCEPT;
+ break;
+ } else if (strcmp(optarg, IPTC_LABEL_DROP) == 0){
+ message->rule.r.action = TARGET_DROP;
+ break;
+ } else if (strcmp(optarg, IPTC_LABEL_RETURN) == 0){
+ message->rule.r.action = TARGET_RETURN;
+ break;
+ }
+ ipt_target = find_target(jumpto, TRY_LOAD);
+ if (ipt_target) {
+ size_t size;
+ size = IPT_ALIGN(sizeof(struct ipt_entry_target))
+ + ipt_target->size;
+
+ ipt_target->t = fw_calloc(1, size);
+ ipt_target->t->u.target_size = size;
+ strcpy(ipt_target->t->u.user.name, jumpto);
+ set_revision(ipt_target->t->u.user.name,
+ ipt_target->revision);
+ if (ipt_target->init != NULL)
+ ipt_target->init(ipt_target->t,
+ &message->rule.e
+ ->nfcache);
+ opts = merge_options(opts,
+ ipt_target->extra_opts,
+ &ipt_target->option_offset);
+ message->rule.r.action = TARGET_EXEC;
+ } else {
+ message->rule.r.action = TARGET_CHAIN;
+ }
+ break;
+ case 'i':
+ check_inverse(optarg, &invert, &optind, argc);
+ set_option(&options, 0, DIMID_INIFACE);
+ parse_native_interface(argv[optind-1],
+ message->rule.indev);
+ rule[DIMID_INIFACE].invert = invert;
+ break;
+ case 'o':
+ check_inverse(optarg, &invert, &optind, argc);
+ set_option(&options, 0, DIMID_OUTIFACE);
+ parse_native_interface(argv[optind-1],
+ message->rule.outdev);
+ rule[DIMID_OUTIFACE].invert = invert;
+ break;
+ case 'v':
+ set_option(&options, OPT_VERBOSE, 0);
+ if (invert)
+ exit_error(PARAMETER_PROBLEM,
+ "cannot have ! before -%s",
+ optflags[OPT_VERBOSE]);
+ break;
+ case 'V':
+ printf("%s v%s\n",
+ program_name, program_version);
+ exit(0);
+ case 'n':
+ set_option(&options, OPT_NUMERIC, 0);
+ if (invert)
+ exit_error(PARAMETER_PROBLEM,
+ "cannot have ! before -%s",
+ optflags[OPT_NUMERIC]);
+ break;
+ case 't':
+ if (invert)
+ exit_error(PARAMETER_PROBLEM,
+ "unexpected ! flag before --table");
+ if (strcmp(argv[optind-1], "filter") != 0)
+ exit_error(PARAMETER_PROBLEM,
+ "no other table than "
+ "filter supported");
+ break;
+ case 'm':
+ {
+ int i, found = 0;
+ if (invert)
+ exit_error(PARAMETER_PROBLEM,
+ "unexpected ! flag before --match");
+ for (i = 0; i < HIPAC_NATIVE_MATCHES; i++){
+ if (strcmp(optarg, native_matches[i]) == 0){
+ found = 1;
+ break;
+ }
+ }
+ // 涓嶅啀鍔犺浇iptables妯″潡
+ }
+ break;
+ case 9:
+ // --debug matched
+ set_option(&options, OPT_DEBUG, 0);
+ if (invert)
+ exit_error(PARAMETER_PROBLEM,
+ "cannot have ! before -%s",
+ optflags[OPT_DEBUG]);
+ break;
+ case 10:
+ // --debugn matched
+ set_option(&options, OPT_DEBUGN, 0);
+ if (invert)
+ exit_error(PARAMETER_PROBLEM,
+ "cannot have ! before -%s",
+ optflags[OPT_DEBUGN]);
+ break;
+
+ case 11:
+ // --line matched
+ set_option(&options, OPT_LINENUMBERS, 0);
+ if (invert)
+ exit_error(PARAMETER_PROBLEM,
+ "cannot have ! before -%s",
+ optflags[OPT_LINENUMBERS]);
+ break;
+ case 12:
+ // --state matched
+ check_inverse(optarg, &invert, &optind, argc);
+ set_option(&options, 0, DIMID_STATE);
+ parse_states(argv[optind-1], rule);
+ rule[DIMID_STATE].invert = invert;
+ break;
+ case 14:
+ // --ttl matched
+ check_inverse(optarg, &invert, &optind, argc);
+ if (invert)
+ exit_error(PARAMETER_PROBLEM,
+ "no ! with --ttl allowed");
+ parse_ttls(argv[optind-1], rule);
+ break;
+ case 15:
+ // --icmp-type matched
+ check_inverse(optarg, &invert, &optind, argc);
+ if (invert)
+ exit_error(PARAMETER_PROBLEM,
+ "no ! with --icmp-type allowed");
+ set_option(&options, 0, DIMID_ICMP_TYPE);
+ parse_icmp(argv[optind-1], rule);
+ break;
+ case 16:
+ // --source-port matched
+ if (rule[DIMID_PROTO].invert)
+ exit_error(PARAMETER_PROBLEM,
+ "--sport only works with "
+ "protocols tcp or udp, so you can't"
+ " use ! on the protocol field");
+ check_inverse(optarg, &invert, &optind, argc);
+ if ((rule[DIMID_PROTO].left == IPPROTO_TCP &&
+ rule[DIMID_PROTO].right == IPPROTO_TCP) ||
+ (rule[DIMID_PROTO].left == IPPROTO_UDP &&
+ rule[DIMID_PROTO].right == IPPROTO_UDP)){
+ parse_ports(argv[optind-1],
+ DIMID_SPORT, rule);
+ rule[DIMID_SPORT].invert = invert;
+ } else exit_error(PARAMETER_PROBLEM,
+ "Can't use --sport without "
+ "protocol. You have to specify "
+ "protocol (tcp or udp) first");
+
+ break;
+ case 17:
+ // --dest-port matched
+ if (rule[DIMID_PROTO].invert)
+ exit_error(PARAMETER_PROBLEM,
+ "--dport only works with "
+ "protocols tcp or udp, so you can't"
+ " use ! on the protocol field");
+ check_inverse(optarg, &invert, &optind, argc);
+ if ((rule[DIMID_PROTO].left == IPPROTO_TCP &&
+ rule[DIMID_PROTO].right == IPPROTO_TCP) ||
+ (rule[DIMID_PROTO].left == IPPROTO_UDP &&
+ rule[DIMID_PROTO].right == IPPROTO_UDP)){
+ parse_ports(argv[optind-1],
+ DIMID_DPORT, rule);
+ rule[DIMID_DPORT].invert = invert;
+ } else exit_error(PARAMETER_PROBLEM,
+ "Can't use --dport without "
+ "protocol. You have to specify "
+ "protocol (tcp or udp) first");
+ break;
+ case 18:
+ // --syn matched
+ check_inverse(optarg, &invert, &optind, argc);
+ if (invert)
+ exit_error(PARAMETER_PROBLEM,
+ "no ! with --syn allowed");
+ set_option(&options, 0, DIMID_SYN);
+ parse_syn("SYN", rule);
+ break;
+ case 19:
+ // --not-syn matched
+ check_inverse(optarg, &invert, &optind, argc);
+ if (invert)
+ exit_error(PARAMETER_PROBLEM,
+ "no ! with --not-syn allowed");
+ set_option(&options, 0, DIMID_SYN);
+ parse_syn("NOT-SYN", rule);
+ break;
+ case 1:
+ if (optarg[0] == '!' && optarg[1] == '\0') {
+ if (invert)
+ exit_error(PARAMETER_PROBLEM,
+ "multiple consecutive ! not"
+ " allowed");
+ invert = TRUE;
+ optarg[0] = '\0';
+ continue;
+ }
+ printf("Bad argument `%s'\n", optarg);
+ exit_tryhelp(2);
+ break;
+ default: /* non option */
+ if (!ipt_target
+ || !(ipt_target->parse(c -
+ ipt_target->option_offset,
+ argv, invert,
+ &ipt_target->tflags,
+ message->rule.e,
+ &ipt_target->t))) {
+ for (matchp = matches; matchp;
+ matchp = matchp->next) {
+ if (matchp->match
+ ->parse(c - matchp->match
+ ->option_offset,
+ argv, invert,
+ &matchp->match->mflags,
+ message->rule.e,
+ &message->rule.e->nfcache,
+ &matchp->match->m))
+ break;
+ }
+ m = matchp ? matchp->match : NULL;
+
+ if (!m)
+ exit_error(PARAMETER_PROBLEM,
+ "Unknown arg `%s'",
+ argv[optind-1]);
+ }
+
+
+ }
+ invert = FALSE;
+ }
+
+ for (matchp = matches; matchp; matchp = matchp->next) {
+ ipt_match_num++;
+ matchp->match->final_check(matchp->match->mflags);
+ }
+
+ if (ipt_target)
+ ipt_target->final_check(ipt_target->tflags);
+
+ if (optind < argc)
+ exit_error(PARAMETER_PROBLEM,
+ "unknown arguments found on commandline");
+
+ if (!message->cmd)
+ exit_error(PARAMETER_PROBLEM, "no command specified");
+
+ generic_opt_check(message->cmd, options);
+
+ if ((ns_addrs > 1) && (rule[DIMID_SRC_IP].invert))
+ exit_error(PARAMETER_PROBLEM, "! not allowed with multiple"
+ " source or destination IP addresses");
+
+ if ((nd_addrs > 1) && (rule[DIMID_DEST_IP].invert))
+ exit_error(PARAMETER_PROBLEM, "! not allowed with multiple"
+ " source or destination IP addresses");
+
+ if (message->cmd == CMD_REPLACE && (ns_addrs > 1 || nd_addrs > 1))
+ exit_error(PARAMETER_PROBLEM, "Replacement rule does not "
+ "specify a unique address");
+
+ if (message->cmd == CMD_APPEND
+ || message->cmd == CMD_DELETE_RULE
+ || message->cmd == CMD_DELETE_POS
+ || message->cmd == CMD_INSERT
+ || message->cmd == CMD_REPLACE) {
+
+
+ if (strcasecmp(message->chain.label, "INPUT") == 0) {
+ /* -o not valid with incoming packets. */
+ if (options & dimid_to_option(DIMID_OUTIFACE))
+ exit_error(PARAMETER_PROBLEM,
+ "Can't use -%s with %s",
+ optflags[NUMBER_OF_OPT
+ + DIMID_OUTIFACE],
+ message->chain.label);
+ }
+
+ if (strcasecmp(message->chain.label, "OUTPUT") == 0) {
+ /* -i not valid with outgoing packets */
+ if (options & dimid_to_option(DIMID_INIFACE))
+ exit_error(PARAMETER_PROBLEM,
+ "Can't use -%s with %s",
+ optflags[NUMBER_OF_OPT +
+ DIMID_INIFACE],
+ message->chain.label);
+ }
+
+ if ((options & dimid_to_option(DIMID_SYN))
+ && !((options & dimid_to_option(DIMID_PROTO)) &&
+ (rule[DIMID_PROTO].left == IPPROTO_TCP) &&
+ (rule[DIMID_PROTO].right == IPPROTO_TCP) &&
+ (rule[DIMID_PROTO].invert == 0)))
+ exit_error(PARAMETER_PROBLEM,
+ "Can use --syn or --not-syn only "
+ "with protocol tcp");
+
+ if ((options & dimid_to_option(DIMID_ICMP_TYPE))
+ && !((options & dimid_to_option(DIMID_PROTO)) &&
+ (rule[DIMID_PROTO].left == IPPROTO_ICMP) &&
+ (rule[DIMID_PROTO].right == IPPROTO_ICMP) &&
+ (rule[DIMID_PROTO].invert == 0)))
+ exit_error(PARAMETER_PROBLEM,
+ "Can use --icmp-type only "
+ "with protocol icmp");
+
+ if ((options & dimid_to_option(DIMID_SYN))
+ || (options & dimid_to_option(DIMID_SPORT))
+ || (options & dimid_to_option(DIMID_DPORT))
+ || (options & dimid_to_option(DIMID_ICMP_TYPE))) {
+ if (options & dimid_to_option(DIMID_FRAGMENT)) {
+ if (rule[DIMID_FRAGMENT].left == 1){
+ exit_error(PARAMETER_PROBLEM,
+ "syn, port and "
+ "icmp type matches are "
+ "not allowed on "
+ "fragments");
+ }
+ } else {
+ set_option(&options, 0, DIMID_FRAGMENT);
+ rule[DIMID_FRAGMENT].left = 0;
+ rule[DIMID_FRAGMENT].right = 0;
+ }
+ }
+ }
+
+ if (message->cmd != CMD_LIST){
+ int i, j;
+ int cmd_size = 0;
+ struct nfhp_cmd* new_message = NULL;
+ if (HAS_CHAIN_TARGET(&message->rule.r)){
+ strcpy((void *) message->rule.e
+ + IPT_ALIGN(sizeof(struct ipt_entry))
+ + ipt_matches_size, jumpto);
+ cmd_size = sizeof(struct nfhp_cmd)
+ + IPT_ALIGN(sizeof(struct ipt_entry))
+ + ipt_matches_size
+ + strlen(jumpto) + 1;
+ message->rule.e->target_offset =
+ IPT_ALIGN(sizeof(struct ipt_entry))
+ + ipt_matches_size;
+ message->rule.e->next_offset =
+ IPT_ALIGN(sizeof(struct ipt_entry))
+ + ipt_matches_size
+ + strlen(jumpto) + 1;
+ } else if (HAS_IPT_TARGET(&message->rule.r)){
+ } else if (ipt_match_num){
+ cmd_size = sizeof(struct nfhp_cmd)
+ + IPT_ALIGN(sizeof(struct ipt_entry))
+ + ipt_matches_size;
+ message->rule.e->target_offset =
+ IPT_ALIGN(sizeof(struct ipt_entry))
+ + ipt_matches_size;
+ message->rule.e->next_offset =
+ IPT_ALIGN(sizeof(struct ipt_entry))
+ + ipt_matches_size;
+ } else {
+ cmd_size = sizeof(struct nfhp_cmd);
+ }
+
+ message->rule.r.match_offset = ipt_matches_size;
+
+ if ((message->cmd == CMD_DELETE_RULE)
+ && HAS_IPT_ENTRY(&message->rule.r)){
+ u_int16_t *cmp;
+ struct ipt_entry_match *k;
+ cmp = (void *) message->rule.e
+ + message->rule.e->next_offset;
+ k = (void *) message->rule.e
+ + IPT_ALIGN(sizeof(struct ipt_entry));
+ cmd_size += ipt_match_num * (sizeof(u_int16_t));
+ for (i = 0; i < ipt_match_num; i++){
+ for (matchp = matches; matchp;
+ matchp = matchp->next) {
+ //if (!matchp->match->used)
+ //continue;
+ //fixme: NULL AHNUNG
+ if (strcmp(k->u.user.name,
+ matchp->match->name) == 0){
+ cmp[i] = matchp->
+ match->userspacesize;
+ k = NEXT_IPT_MATCH(k);
+ break;
+ }
+ }
+ }
+ if (HAS_IPT_TARGET(&message->rule.r)){
+ }
+ }
+
+ if ((options & OPT_DEBUG) ||
+ (options & OPT_DEBUGN)){
+ print_header(message->chain.label,
+ message->chain.policy);
+ }
+
+ if (ns_addrs < 1) ns_addrs = 1;
+ if (nd_addrs < 1) nd_addrs = 1;
+ for (i = 0; i < ns_addrs; i++)
+ for (j = 0; j < nd_addrs; j++){
+ if (s_ips)
+ rule[DIMID_SRC_IP].left =
+ rule[DIMID_SRC_IP].right =
+ (s_ips[i]).s_addr;
+ if (d_ips)
+ rule[DIMID_DEST_IP].left =
+ rule[DIMID_DEST_IP].right =
+ (d_ips[j]).s_addr;
+
+ fill_hipac_matches(message, rule);
+
+ if (((i + 1) * (j + 1))
+ < (ns_addrs * nd_addrs)){
+ new_message =
+ nlhp_copy_cmd(message,
+ cmd_size);
+ if (!new_message)
+ perror("nf-hipac: low memory");
+ }
+
+ if ((options & OPT_DEBUG) ||
+ (options & OPT_DEBUGN)){
+ print_rule((struct nfhp_list_rule *)
+ &(message->rule), 1);
+ nlhp_free_cmd(message);
+ } else {
+ if (options & OPT_VERBOSE)
+ print_rule(
+ (struct
+ nfhp_list_rule *)
+ &(message->rule), 1);
+ cmd_return = nlhp_send_cmd(message,
+ cmd_size,
+ NULL, &err);
+ nlhp_free_cmd(message);
+ ret = handle_error(cmd_return, err);
+ if (ret != 0)
+ return ret;
+ }
+ message = new_message;
+ }
+ } else {
+ listing_start = 1;
+ cmd_return = nlhp_send_cmd(message, sizeof(struct nfhp_cmd),
+ process_list, &err);
+ ret = handle_error(cmd_return, err);
+ }
+
+ clear_rule_matches(&matches);
+
+ if (s_ips)
+ free(s_ips);
+ if (d_ips)
+ free(d_ips);
+
+ free_opts(1);
+
+ options = 0;
+
+ return ret;
+}
+
diff -urN nf-hipac/user/nfhp_com.h nfhipac/user/nfhp_com.h
--- nf-hipac/user/nfhp_com.h 1970-01-01 08:00:00.000000000 +0800
+++ nfhipac/user/nfhp_com.h 2014-11-21 12:41:57.000000000 +0800
@@ -0,0 +1,330 @@
+/*
+ * High performance packet classification
+ *
+ *
+ * (c) 2004-2005 MARA Systems AB