diff --git a/libs/libks/.gitignore b/libs/libks/.gitignore new file mode 100644 index 0000000000..740a5af167 --- /dev/null +++ b/libs/libks/.gitignore @@ -0,0 +1,10 @@ +Makefile +Makefile.in +build/compile +build/libtool.m4 +build/ltoptions.m4 +build/ltsugar.m4 +build/ltversion.m4 +build/lt~obsolete.m4 +dht-example.id +configure diff --git a/libs/libks/AUTHORS b/libs/libks/AUTHORS new file mode 100644 index 0000000000..e69de29bb2 diff --git a/libs/libks/COPYING b/libs/libks/COPYING new file mode 100644 index 0000000000..e69de29bb2 diff --git a/libs/libks/ChangeLog b/libs/libks/ChangeLog new file mode 100644 index 0000000000..e69de29bb2 diff --git a/libs/libks/INSTALL b/libs/libks/INSTALL new file mode 100644 index 0000000000..2099840756 --- /dev/null +++ b/libs/libks/INSTALL @@ -0,0 +1,370 @@ +Installation Instructions +************************* + +Copyright (C) 1994-1996, 1999-2002, 2004-2013 Free Software Foundation, +Inc. + + Copying and distribution of this file, with or without modification, +are permitted in any medium without royalty provided the copyright +notice and this notice are preserved. This file is offered as-is, +without warranty of any kind. + +Basic Installation +================== + + Briefly, the shell command `./configure && make && make install' +should configure, build, and install this package. The following +more-detailed instructions are generic; see the `README' file for +instructions specific to this package. Some packages provide this +`INSTALL' file but do not implement all of the features documented +below. The lack of an optional feature in a given package is not +necessarily a bug. More recommendations for GNU packages can be found +in *note Makefile Conventions: (standards)Makefile Conventions. + + The `configure' shell script attempts to guess correct values for +various system-dependent variables used during compilation. It uses +those values to create a `Makefile' in each directory of the package. +It may also create one or more `.h' files containing system-dependent +definitions. Finally, it creates a shell script `config.status' that +you can run in the future to recreate the current configuration, and a +file `config.log' containing compiler output (useful mainly for +debugging `configure'). + + It can also use an optional file (typically called `config.cache' +and enabled with `--cache-file=config.cache' or simply `-C') that saves +the results of its tests to speed up reconfiguring. Caching is +disabled by default to prevent problems with accidental use of stale +cache files. + + If you need to do unusual things to compile the package, please try +to figure out how `configure' could check whether to do them, and mail +diffs or instructions to the address given in the `README' so they can +be considered for the next release. If you are using the cache, and at +some point `config.cache' contains results you don't want to keep, you +may remove or edit it. + + The file `configure.ac' (or `configure.in') is used to create +`configure' by a program called `autoconf'. You need `configure.ac' if +you want to change it or regenerate `configure' using a newer version +of `autoconf'. + + The simplest way to compile this package is: + + 1. `cd' to the directory containing the package's source code and type + `./configure' to configure the package for your system. + + Running `configure' might take a while. While running, it prints + some messages telling which features it is checking for. + + 2. Type `make' to compile the package. + + 3. Optionally, type `make check' to run any self-tests that come with + the package, generally using the just-built uninstalled binaries. + + 4. Type `make install' to install the programs and any data files and + documentation. When installing into a prefix owned by root, it is + recommended that the package be configured and built as a regular + user, and only the `make install' phase executed with root + privileges. + + 5. Optionally, type `make installcheck' to repeat any self-tests, but + this time using the binaries in their final installed location. + This target does not install anything. Running this target as a + regular user, particularly if the prior `make install' required + root privileges, verifies that the installation completed + correctly. + + 6. You can remove the program binaries and object files from the + source code directory by typing `make clean'. To also remove the + files that `configure' created (so you can compile the package for + a different kind of computer), type `make distclean'. There is + also a `make maintainer-clean' target, but that is intended mainly + for the package's developers. If you use it, you may have to get + all sorts of other programs in order to regenerate files that came + with the distribution. + + 7. Often, you can also type `make uninstall' to remove the installed + files again. In practice, not all packages have tested that + uninstallation works correctly, even though it is required by the + GNU Coding Standards. + + 8. Some packages, particularly those that use Automake, provide `make + distcheck', which can by used by developers to test that all other + targets like `make install' and `make uninstall' work correctly. + This target is generally not run by end users. + +Compilers and Options +===================== + + Some systems require unusual options for compilation or linking that +the `configure' script does not know about. Run `./configure --help' +for details on some of the pertinent environment variables. + + You can give `configure' initial values for configuration parameters +by setting variables in the command line or in the environment. Here +is an example: + + ./configure CC=c99 CFLAGS=-g LIBS=-lposix + + *Note Defining Variables::, for more details. + +Compiling For Multiple Architectures +==================================== + + You can compile the package for more than one kind of computer at the +same time, by placing the object files for each architecture in their +own directory. To do this, you can use GNU `make'. `cd' to the +directory where you want the object files and executables to go and run +the `configure' script. `configure' automatically checks for the +source code in the directory that `configure' is in and in `..'. This +is known as a "VPATH" build. + + With a non-GNU `make', it is safer to compile the package for one +architecture at a time in the source code directory. After you have +installed the package for one architecture, use `make distclean' before +reconfiguring for another architecture. + + On MacOS X 10.5 and later systems, you can create libraries and +executables that work on multiple system types--known as "fat" or +"universal" binaries--by specifying multiple `-arch' options to the +compiler but only a single `-arch' option to the preprocessor. Like +this: + + ./configure CC="gcc -arch i386 -arch x86_64 -arch ppc -arch ppc64" \ + CXX="g++ -arch i386 -arch x86_64 -arch ppc -arch ppc64" \ + CPP="gcc -E" CXXCPP="g++ -E" + + This is not guaranteed to produce working output in all cases, you +may have to build one architecture at a time and combine the results +using the `lipo' tool if you have problems. + +Installation Names +================== + + By default, `make install' installs the package's commands under +`/usr/local/bin', include files under `/usr/local/include', etc. You +can specify an installation prefix other than `/usr/local' by giving +`configure' the option `--prefix=PREFIX', where PREFIX must be an +absolute file name. + + You can specify separate installation prefixes for +architecture-specific files and architecture-independent files. If you +pass the option `--exec-prefix=PREFIX' to `configure', the package uses +PREFIX as the prefix for installing programs and libraries. +Documentation and other data files still use the regular prefix. + + In addition, if you use an unusual directory layout you can give +options like `--bindir=DIR' to specify different values for particular +kinds of files. Run `configure --help' for a list of the directories +you can set and what kinds of files go in them. In general, the +default for these options is expressed in terms of `${prefix}', so that +specifying just `--prefix' will affect all of the other directory +specifications that were not explicitly provided. + + The most portable way to affect installation locations is to pass the +correct locations to `configure'; however, many packages provide one or +both of the following shortcuts of passing variable assignments to the +`make install' command line to change installation locations without +having to reconfigure or recompile. + + The first method involves providing an override variable for each +affected directory. For example, `make install +prefix=/alternate/directory' will choose an alternate location for all +directory configuration variables that were expressed in terms of +`${prefix}'. Any directories that were specified during `configure', +but not in terms of `${prefix}', must each be overridden at install +time for the entire installation to be relocated. The approach of +makefile variable overrides for each directory variable is required by +the GNU Coding Standards, and ideally causes no recompilation. +However, some platforms have known limitations with the semantics of +shared libraries that end up requiring recompilation when using this +method, particularly noticeable in packages that use GNU Libtool. + + The second method involves providing the `DESTDIR' variable. For +example, `make install DESTDIR=/alternate/directory' will prepend +`/alternate/directory' before all installation names. The approach of +`DESTDIR' overrides is not required by the GNU Coding Standards, and +does not work on platforms that have drive letters. On the other hand, +it does better at avoiding recompilation issues, and works well even +when some directory options were not specified in terms of `${prefix}' +at `configure' time. + +Optional Features +================= + + If the package supports it, you can cause programs to be installed +with an extra prefix or suffix on their names by giving `configure' the +option `--program-prefix=PREFIX' or `--program-suffix=SUFFIX'. + + Some packages pay attention to `--enable-FEATURE' options to +`configure', where FEATURE indicates an optional part of the package. +They may also pay attention to `--with-PACKAGE' options, where PACKAGE +is something like `gnu-as' or `x' (for the X Window System). The +`README' should mention any `--enable-' and `--with-' options that the +package recognizes. + + For packages that use the X Window System, `configure' can usually +find the X include and library files automatically, but if it doesn't, +you can use the `configure' options `--x-includes=DIR' and +`--x-libraries=DIR' to specify their locations. + + Some packages offer the ability to configure how verbose the +execution of `make' will be. For these packages, running `./configure +--enable-silent-rules' sets the default to minimal output, which can be +overridden with `make V=1'; while running `./configure +--disable-silent-rules' sets the default to verbose, which can be +overridden with `make V=0'. + +Particular systems +================== + + On HP-UX, the default C compiler is not ANSI C compatible. If GNU +CC is not installed, it is recommended to use the following options in +order to use an ANSI C compiler: + + ./configure CC="cc -Ae -D_XOPEN_SOURCE=500" + +and if that doesn't work, install pre-built binaries of GCC for HP-UX. + + HP-UX `make' updates targets which have the same time stamps as +their prerequisites, which makes it generally unusable when shipped +generated files such as `configure' are involved. Use GNU `make' +instead. + + On OSF/1 a.k.a. Tru64, some versions of the default C compiler cannot +parse its `' header file. The option `-nodtk' can be used as +a workaround. If GNU CC is not installed, it is therefore recommended +to try + + ./configure CC="cc" + +and if that doesn't work, try + + ./configure CC="cc -nodtk" + + On Solaris, don't put `/usr/ucb' early in your `PATH'. This +directory contains several dysfunctional programs; working variants of +these programs are available in `/usr/bin'. So, if you need `/usr/ucb' +in your `PATH', put it _after_ `/usr/bin'. + + On Haiku, software installed for all users goes in `/boot/common', +not `/usr/local'. It is recommended to use the following options: + + ./configure --prefix=/boot/common + +Specifying the System Type +========================== + + There may be some features `configure' cannot figure out +automatically, but needs to determine by the type of machine the package +will run on. Usually, assuming the package is built to be run on the +_same_ architectures, `configure' can figure that out, but if it prints +a message saying it cannot guess the machine type, give it the +`--build=TYPE' option. TYPE can either be a short name for the system +type, such as `sun4', or a canonical name which has the form: + + CPU-COMPANY-SYSTEM + +where SYSTEM can have one of these forms: + + OS + KERNEL-OS + + See the file `config.sub' for the possible values of each field. If +`config.sub' isn't included in this package, then this package doesn't +need to know the machine type. + + If you are _building_ compiler tools for cross-compiling, you should +use the option `--target=TYPE' to select the type of system they will +produce code for. + + If you want to _use_ a cross compiler, that generates code for a +platform different from the build platform, you should specify the +"host" platform (i.e., that on which the generated programs will +eventually be run) with `--host=TYPE'. + +Sharing Defaults +================ + + If you want to set default values for `configure' scripts to share, +you can create a site shell script called `config.site' that gives +default values for variables like `CC', `cache_file', and `prefix'. +`configure' looks for `PREFIX/share/config.site' if it exists, then +`PREFIX/etc/config.site' if it exists. Or, you can set the +`CONFIG_SITE' environment variable to the location of the site script. +A warning: not all `configure' scripts look for a site script. + +Defining Variables +================== + + Variables not defined in a site shell script can be set in the +environment passed to `configure'. However, some packages may run +configure again during the build, and the customized values of these +variables may be lost. In order to avoid this problem, you should set +them in the `configure' command line, using `VAR=value'. For example: + + ./configure CC=/usr/local2/bin/gcc + +causes the specified `gcc' to be used as the C compiler (unless it is +overridden in the site shell script). + +Unfortunately, this technique does not work for `CONFIG_SHELL' due to +an Autoconf limitation. Until the limitation is lifted, you can use +this workaround: + + CONFIG_SHELL=/bin/bash ./configure CONFIG_SHELL=/bin/bash + +`configure' Invocation +====================== + + `configure' recognizes the following options to control how it +operates. + +`--help' +`-h' + Print a summary of all of the options to `configure', and exit. + +`--help=short' +`--help=recursive' + Print a summary of the options unique to this package's + `configure', and exit. The `short' variant lists options used + only in the top level, while the `recursive' variant lists options + also present in any nested packages. + +`--version' +`-V' + Print the version of Autoconf used to generate the `configure' + script, and exit. + +`--cache-file=FILE' + Enable the cache: use and save the results of the tests in FILE, + traditionally `config.cache'. FILE defaults to `/dev/null' to + disable caching. + +`--config-cache' +`-C' + Alias for `--cache-file=config.cache'. + +`--quiet' +`--silent' +`-q' + Do not print messages saying which checks are being made. To + suppress all normal output, redirect it to `/dev/null' (any error + messages will still be shown). + +`--srcdir=DIR' + Look for the package's source code in directory DIR. Usually + `configure' can determine that directory automatically. + +`--prefix=DIR' + Use DIR as the installation prefix. *note Installation Names:: + for more details, including other options available for fine-tuning + the installation locations. + +`--no-create' +`-n' + Run the configure checks, but stop before creating any output + files. + +`configure' also accepts some other, not widely useful, options. Run +`configure --help' for more details. diff --git a/libs/libks/Makefile.am b/libs/libks/Makefile.am new file mode 100644 index 0000000000..b04069b419 --- /dev/null +++ b/libs/libks/Makefile.am @@ -0,0 +1,36 @@ +ACLOCAL_AMFLAGS=-I build +EXTRA_DIST = +SUBDIRS = . test +AUTOMAKE_OPTIONS = subdir-objects + +AM_CFLAGS += -I$(top_srcdir)/src -I$(top_srcdir)/src/include -I$(top_srcdir)/crypt +AM_CPPFLAGS = $(AM_CFLAGS) + +lib_LTLIBRARIES = libks.la +libks_la_SOURCES = src/ks.c src/ks_string.c src/ks_json.c src/ks_thread.c src/ks_mutex.c src/ks_config.c +libks_la_SOURCES += src/ks_log.c src/ks_socket.c src/ks_buffer.c src/ks_pool.c src/simclist.c +libks_la_SOURCES += src/ks_time.c src/ks_printf.c src/ks_hash.c src/ks_q.c src/ks_dso.c src/ks_dht.c +libks_la_SOURCES += src/ks_ssl.c src/kws.c src/ks_rng.c +libks_la_SOURCES += src/utp/utp_api.cpp src/utp/utp_callbacks.cpp src/utp/utp_hash.cpp src/utp/utp_internal.cpp +libks_la_SOURCES += src/utp/utp_packedsockaddr.cpp src/utp/utp_utils.cpp src/ks_bencode.c +libks_la_SOURCES += crypt/aeskey.c crypt/aestab.c crypt/sha2.c crypt/twofish.c crypt/aes_modes.c crypt/aescrypt.c crypt/twofish_cfb.c +#aes.h aescpp.h brg_endian.h aesopt.h aestab.h brg_types.h sha2.h twofish.h + +libks_la_CFLAGS = $(AM_CFLAGS) +libks_la_CPPFLAGS = -DPOSIX +libks_la_LDFLAGS = $(AM_LDFLAGS) -version-info 0:1:0 -lncurses -lpthread -lm + +library_includedir = $(prefix)/include +library_include_HEADERS = src/include/ks_config.h src/include/ks.h src/include/ks_threadmutex.h src/include/ks_json.h src/include/ks_buffer.h +library_include_HEADERS += src/include/ks_pool.h src/include/simclist.h src/include/ks_time.h src/include/ks_q.h src/include/ks_socket.h +library_include_HEADERS += src/include/ks_dso.h src/include/ks_dht.h src/include/ks_platform.h src/include/ks_types.h # src/include/ks_rng.h +library_include_HEADERS += src/include/ks_printf.h src/include/ks_hash.h src/include/ks_ssl.h src/include/kws.h +library_include_HEADERS += src/utp/utp_internal.h src/utp/utp.h src/utp/utp_types.h src/utp/utp_callbacks.h src/utp/utp_templates.h +library_include_HEADERS += src/utp/utp_hash.h src/utp/utp_packedsockaddr.h src/utp/utp_utils.h src/include/ks_utp.h + +tests: libks.la + $(MAKE) -C test tests + + + + diff --git a/libs/libks/NEWS b/libs/libks/NEWS new file mode 100644 index 0000000000..e69de29bb2 diff --git a/libs/libks/README b/libs/libks/README new file mode 100644 index 0000000000..e69de29bb2 diff --git a/libs/libks/acinclude.m4 b/libs/libks/acinclude.m4 new file mode 100644 index 0000000000..1b464ca796 --- /dev/null +++ b/libs/libks/acinclude.m4 @@ -0,0 +1,7 @@ +m4_include([build/config/ax_compiler_vendor.m4]) +m4_include([build/config/ax_cflags_warn_all_ansi.m4]) +m4_include([build/config/ax_cc_maxopt.m4]) +m4_include([build/config/ax_check_compiler_flags.m4]) +m4_include([build/config/ac_gcc_archflag.m4]) +m4_include([build/config/ac_gcc_x86_cpuid.m4]) +m4_include([build/config/sac-openssl.m4]) diff --git a/libs/libks/bootstrap.sh b/libs/libks/bootstrap.sh new file mode 100755 index 0000000000..e8ca45f592 --- /dev/null +++ b/libs/libks/bootstrap.sh @@ -0,0 +1,4 @@ +#!/bin/sh + +set -x +${AUTORECONF:-autoreconf} -fi diff --git a/libs/libks/build/config/ac_cflags_gcc_option.m4 b/libs/libks/build/config/ac_cflags_gcc_option.m4 new file mode 100644 index 0000000000..e651a5e6c5 --- /dev/null +++ b/libs/libks/build/config/ac_cflags_gcc_option.m4 @@ -0,0 +1,142 @@ +AC_DEFUN([AX_CFLAGS_GCC_OPTION_OLD], [dnl +AS_VAR_PUSHDEF([FLAGS],[CFLAGS])dnl +AS_VAR_PUSHDEF([VAR],[ac_cv_cflags_gcc_option_$2])dnl +AC_CACHE_CHECK([m4_ifval($1,$1,FLAGS) for gcc m4_ifval($2,$2,-option)], +VAR,[VAR="no, unknown" + AC_LANG_SAVE + AC_LANG_C + ac_save_[]FLAGS="$[]FLAGS" +for ac_arg dnl +in "-pedantic % m4_ifval($2,$2,-option)" dnl GCC + # +do FLAGS="$ac_save_[]FLAGS "`echo $ac_arg | sed -e 's,%%.*,,' -e 's,%,,'` + AC_TRY_COMPILE([],[return 0;], + [VAR=`echo $ac_arg | sed -e 's,.*% *,,'` ; break]) +done + FLAGS="$ac_save_[]FLAGS" + AC_LANG_RESTORE +]) +case ".$VAR" in + .ok|.ok,*) m4_ifvaln($3,$3) ;; + .|.no|.no,*) m4_ifvaln($4,$4) ;; + *) m4_ifvaln($3,$3,[ + if echo " $[]m4_ifval($1,$1,FLAGS) " | grep " $VAR " 2>&1 >/dev/null + then AC_RUN_LOG([: m4_ifval($1,$1,FLAGS) does contain $VAR]) + else AC_RUN_LOG([: m4_ifval($1,$1,FLAGS)="$m4_ifval($1,$1,FLAGS) $VAR"]) + m4_ifval($1,$1,FLAGS)="$m4_ifval($1,$1,FLAGS) $VAR" + fi ]) ;; +esac +AS_VAR_POPDEF([VAR])dnl +AS_VAR_POPDEF([FLAGS])dnl +]) + + +dnl the only difference - the LANG selection... and the default FLAGS + +AC_DEFUN([AX_CXXFLAGS_GCC_OPTION_OLD], [dnl +AS_VAR_PUSHDEF([FLAGS],[CXXFLAGS])dnl +AS_VAR_PUSHDEF([VAR],[ac_cv_cxxflags_gcc_option_$2])dnl +AC_CACHE_CHECK([m4_ifval($1,$1,FLAGS) for gcc m4_ifval($2,$2,-option)], +VAR,[VAR="no, unknown" + AC_LANG_SAVE + AC_LANG_CXX + ac_save_[]FLAGS="$[]FLAGS" +for ac_arg dnl +in "-pedantic % m4_ifval($2,$2,-option)" dnl GCC + # +do FLAGS="$ac_save_[]FLAGS "`echo $ac_arg | sed -e 's,%%.*,,' -e 's,%,,'` + AC_TRY_COMPILE([],[return 0;], + [VAR=`echo $ac_arg | sed -e 's,.*% *,,'` ; break]) +done + FLAGS="$ac_save_[]FLAGS" + AC_LANG_RESTORE +]) +case ".$VAR" in + .ok|.ok,*) m4_ifvaln($3,$3) ;; + .|.no|.no,*) m4_ifvaln($4,$4) ;; + *) m4_ifvaln($3,$3,[ + if echo " $[]m4_ifval($1,$1,FLAGS) " | grep " $VAR " 2>&1 >/dev/null + then AC_RUN_LOG([: m4_ifval($1,$1,FLAGS) does contain $VAR]) + else AC_RUN_LOG([: m4_ifval($1,$1,FLAGS)="$m4_ifval($1,$1,FLAGS) $VAR"]) + m4_ifval($1,$1,FLAGS)="$m4_ifval($1,$1,FLAGS) $VAR" + fi ]) ;; +esac +AS_VAR_POPDEF([VAR])dnl +AS_VAR_POPDEF([FLAGS])dnl +]) + +dnl ------------------------------------------------------------------------- + +AC_DEFUN([AX_CFLAGS_GCC_OPTION_NEW], [dnl +AS_VAR_PUSHDEF([FLAGS],[CFLAGS])dnl +AS_VAR_PUSHDEF([VAR],[ac_cv_cflags_gcc_option_$1])dnl +AC_CACHE_CHECK([m4_ifval($2,$2,FLAGS) for gcc m4_ifval($1,$1,-option)], +VAR,[VAR="no, unknown" + AC_LANG_SAVE + AC_LANG_C + ac_save_[]FLAGS="$[]FLAGS" +for ac_arg dnl +in "-pedantic % m4_ifval($1,$1,-option)" dnl GCC + # +do FLAGS="$ac_save_[]FLAGS "`echo $ac_arg | sed -e 's,%%.*,,' -e 's,%,,'` + AC_TRY_COMPILE([],[return 0;], + [VAR=`echo $ac_arg | sed -e 's,.*% *,,'` ; break]) +done + FLAGS="$ac_save_[]FLAGS" + AC_LANG_RESTORE +]) +case ".$VAR" in + .ok|.ok,*) m4_ifvaln($3,$3) ;; + .|.no|.no,*) m4_ifvaln($4,$4) ;; + *) m4_ifvaln($3,$3,[ + if echo " $[]m4_ifval($2,$2,FLAGS) " | grep " $VAR " 2>&1 >/dev/null + then AC_RUN_LOG([: m4_ifval($2,$2,FLAGS) does contain $VAR]) + else AC_RUN_LOG([: m4_ifval($2,$2,FLAGS)="$m4_ifval($2,$2,FLAGS) $VAR"]) + m4_ifval($2,$2,FLAGS)="$m4_ifval($2,$2,FLAGS) $VAR" + fi ]) ;; +esac +AS_VAR_POPDEF([VAR])dnl +AS_VAR_POPDEF([FLAGS])dnl +]) + + +dnl the only difference - the LANG selection... and the default FLAGS + +AC_DEFUN([AX_CXXFLAGS_GCC_OPTION_NEW], [dnl +AS_VAR_PUSHDEF([FLAGS],[CXXFLAGS])dnl +AS_VAR_PUSHDEF([VAR],[ac_cv_cxxflags_gcc_option_$1])dnl +AC_CACHE_CHECK([m4_ifval($2,$2,FLAGS) for gcc m4_ifval($1,$1,-option)], +VAR,[VAR="no, unknown" + AC_LANG_SAVE + AC_LANG_CXX + ac_save_[]FLAGS="$[]FLAGS" +for ac_arg dnl +in "-pedantic % m4_ifval($1,$1,-option)" dnl GCC + # +do FLAGS="$ac_save_[]FLAGS "`echo $ac_arg | sed -e 's,%%.*,,' -e 's,%,,'` + AC_TRY_COMPILE([],[return 0;], + [VAR=`echo $ac_arg | sed -e 's,.*% *,,'` ; break]) +done + FLAGS="$ac_save_[]FLAGS" + AC_LANG_RESTORE +]) +case ".$VAR" in + .ok|.ok,*) m4_ifvaln($3,$3) ;; + .|.no|.no,*) m4_ifvaln($4,$4) ;; + *) m4_ifvaln($3,$3,[ + if echo " $[]m4_ifval($2,$2,FLAGS) " | grep " $VAR " 2>&1 >/dev/null + then AC_RUN_LOG([: m4_ifval($2,$2,FLAGS) does contain $VAR]) + else AC_RUN_LOG([: m4_ifval($2,$2,FLAGS)="$m4_ifval($2,$2,FLAGS) $VAR"]) + m4_ifval($2,$2,FLAGS)="$m4_ifval($2,$2,FLAGS) $VAR" + fi ]) ;; +esac +AS_VAR_POPDEF([VAR])dnl +AS_VAR_POPDEF([FLAGS])dnl +]) + +AC_DEFUN([AX_CFLAGS_GCC_OPTION],[ifelse(m4_bregexp([$2],[-]),-1, +[AX_CFLAGS_GCC_OPTION_NEW($@)],[AX_CFLAGS_GCC_OPTION_OLD($@)])]) + +AC_DEFUN([AX_CXXFLAGS_GCC_OPTION],[ifelse(m4_bregexp([$2],[-]),-1, +[AX_CXXFLAGS_GCC_OPTION_NEW($@)],[AX_CXXFLAGS_GCC_OPTION_OLD($@)])]) + diff --git a/libs/libks/build/config/ac_cflags_sun_option.m4 b/libs/libks/build/config/ac_cflags_sun_option.m4 new file mode 100644 index 0000000000..a09e6fb695 --- /dev/null +++ b/libs/libks/build/config/ac_cflags_sun_option.m4 @@ -0,0 +1,140 @@ +AC_DEFUN([AX_CFLAGS_SUN_OPTION_OLD], [dnl +AS_VAR_PUSHDEF([FLAGS],[CFLAGS])dnl +AS_VAR_PUSHDEF([VAR],[ac_cv_cflags_sun_option_$2])dnl +AC_CACHE_CHECK([m4_ifval($1,$1,FLAGS) for sun/cc m4_ifval($2,$2,-option)], +VAR,[VAR="no, unknown" + AC_LANG_SAVE + AC_LANG_C + ac_save_[]FLAGS="$[]FLAGS" +for ac_arg dnl +in "+xstrconst % -xc99=all m4_ifval($2,$2,-option)" dnl Solaris C + # +do FLAGS="$ac_save_[]FLAGS "`echo $ac_arg | sed -e 's,%%.*,,' -e 's,%,,'` + AC_TRY_COMPILE([],[return 0;], + [VAR=`echo $ac_arg | sed -e 's,.*% *,,'` ; break]) +done + FLAGS="$ac_save_[]FLAGS" + AC_LANG_RESTORE +]) +case ".$VAR" in + .ok|.ok,*) m4_ifvaln($3,$3) ;; + .|.no|.no,*) m4_ifvaln($4,$4) ;; + *) m4_ifvaln($3,$3,[ + if echo " $[]m4_ifval($1,$1,FLAGS) " | grep " $VAR " 2>&1 >/dev/null + then AC_RUN_LOG([: m4_ifval($1,$1,FLAGS) does contain $VAR]) + else AC_RUN_LOG([: m4_ifval($1,$1,FLAGS)="$m4_ifval($1,$1,FLAGS) $VAR"]) + m4_ifval($1,$1,FLAGS)="$m4_ifval($1,$1,FLAGS) $VAR" + fi ]) ;; +esac +AS_VAR_POPDEF([VAR])dnl +AS_VAR_POPDEF([FLAGS])dnl +]) + +dnl the only difference - the LANG selection... and the default FLAGS + +AC_DEFUN([AX_CXXFLAGS_SUN_OPTION_OLD], [dnl +AS_VAR_PUSHDEF([FLAGS],[CXXFLAGS])dnl +AS_VAR_PUSHDEF([VAR],[ac_cv_cxxflags_sun_option_$2])dnl +AC_CACHE_CHECK([m4_ifval($1,$1,FLAGS) for sun/cc m4_ifval($2,$2,-option)], +VAR,[VAR="no, unknown" + AC_LANG_SAVE + AC_LANG_CXX + ac_save_[]FLAGS="$[]FLAGS" +for ac_arg dnl +in "+xstrconst % -xc99=all m4_ifval($2,$2,-option)" dnl Solaris C + # +do FLAGS="$ac_save_[]FLAGS "`echo $ac_arg | sed -e 's,%%.*,,' -e 's,%,,'` + AC_TRY_COMPILE([],[return 0;], + [VAR=`echo $ac_arg | sed -e 's,.*% *,,'` ; break]) +done + FLAGS="$ac_save_[]FLAGS" + AC_LANG_RESTORE +]) +case ".$VAR" in + .ok|.ok,*) m4_ifvaln($3,$3) ;; + .|.no|.no,*) m4_ifvaln($4,$4) ;; + *) m4_ifvaln($3,$3,[ + if echo " $[]m4_ifval($1,$1,FLAGS) " | grep " $VAR " 2>&1 >/dev/null + then AC_RUN_LOG([: m4_ifval($1,$1,FLAGS) does contain $VAR]) + else AC_RUN_LOG([: m4_ifval($1,$1,FLAGS)="$m4_ifval($1,$1,FLAGS) $VAR"]) + m4_ifval($1,$1,FLAGS)="$m4_ifval($1,$1,FLAGS) $VAR" + fi ]) ;; +esac +AS_VAR_POPDEF([VAR])dnl +AS_VAR_POPDEF([FLAGS])dnl +]) + +dnl ----------------------------------------------------------------------- + +AC_DEFUN([AX_CFLAGS_SUN_OPTION_NEW], [dnl +AS_VAR_PUSHDEF([FLAGS],[CFLAGS])dnl +AS_VAR_PUSHDEF([VAR],[ac_cv_cflags_sun_option_$1])dnl +AC_CACHE_CHECK([m4_ifval($2,$2,FLAGS) for sun/cc m4_ifval($1,$1,-option)], +VAR,[VAR="no, unknown" + AC_LANG_SAVE + AC_LANG_C + ac_save_[]FLAGS="$[]FLAGS" +for ac_arg dnl +in "+xstrconst % -xc99=all m4_ifval($1,$1,-option)" dnl Solaris C + # +do FLAGS="$ac_save_[]FLAGS "`echo $ac_arg | sed -e 's,%%.*,,' -e 's,%,,'` + AC_TRY_COMPILE([],[return 0;], + [VAR=`echo $ac_arg | sed -e 's,.*% *,,'` ; break]) +done + FLAGS="$ac_save_[]FLAGS" + AC_LANG_RESTORE +]) +case ".$VAR" in + .ok|.ok,*) m4_ifvaln($3,$3) ;; + .|.no|.no,*) m4_ifvaln($4,$4) ;; + *) m4_ifvaln($3,$3,[ + if echo " $[]m4_ifval($2,$2,FLAGS) " | grep " $VAR " 2>&1 >/dev/null + then AC_RUN_LOG([: m4_ifval($2,$2,FLAGS) does contain $VAR]) + else AC_RUN_LOG([: m4_ifval($2,$2,FLAGS)="$m4_ifval($2,$2,FLAGS) $VAR"]) + m4_ifval($2,$2,FLAGS)="$m4_ifval($2,$2,FLAGS) $VAR" + fi ]) ;; +esac +AS_VAR_POPDEF([VAR])dnl +AS_VAR_POPDEF([FLAGS])dnl +]) + +dnl the only difference - the LANG selection... and the default FLAGS + +AC_DEFUN([AX_CXXFLAGS_SUN_OPTION_NEW], [dnl +AS_VAR_PUSHDEF([FLAGS],[CXXFLAGS])dnl +AS_VAR_PUSHDEF([VAR],[ac_cv_cxxflags_sun_option_$1])dnl +AC_CACHE_CHECK([m4_ifval($2,$2,FLAGS) for sun/cc m4_ifval($1,$1,-option)], +VAR,[VAR="no, unknown" + AC_LANG_SAVE + AC_LANG_CXX + ac_save_[]FLAGS="$[]FLAGS" +for ac_arg dnl +in "+xstrconst % -xc99=all m4_ifval($1,$1,-option)" dnl Solaris C + # +do FLAGS="$ac_save_[]FLAGS "`echo $ac_arg | sed -e 's,%%.*,,' -e 's,%,,'` + AC_TRY_COMPILE([],[return 0;], + [VAR=`echo $ac_arg | sed -e 's,.*% *,,'` ; break]) +done + FLAGS="$ac_save_[]FLAGS" + AC_LANG_RESTORE +]) +case ".$VAR" in + .ok|.ok,*) m4_ifvaln($3,$3) ;; + .|.no|.no,*) m4_ifvaln($4,$4) ;; + *) m4_ifvaln($3,$3,[ + if echo " $[]m4_ifval($2,$2,FLAGS) " | grep " $VAR " 2>&1 >/dev/null + then AC_RUN_LOG([: m4_ifval($2,$2,FLAGS) does contain $VAR]) + else AC_RUN_LOG([: m4_ifval($2,$2,FLAGS)="$m4_ifval($2,$2,FLAGS) $VAR"]) + m4_ifval($2,$2,FLAGS)="$m4_ifval($2,$2,FLAGS) $VAR" + fi ]) ;; +esac +AS_VAR_POPDEF([VAR])dnl +AS_VAR_POPDEF([FLAGS])dnl +]) + +AC_DEFUN([AX_CFLAGS_SUN_OPTION],[ifelse(m4_regexp([$2],[-]),-1, +[AX_CFLAGS_SUN_OPTION_NEW($@)],[AX_CFLAGS_SUN_OPTION_OLD($@)])]) + +AC_DEFUN([AX_CXXFLAGS_SUN_OPTION],[ifelse(m4_regexp([$2],[-]),-1, +[AX_CXXFLAGS_SUN_OPTION_NEW($@)],[AX_CXXFLAGS_SUN_OPTION_OLD($@)])]) + diff --git a/libs/libks/build/config/ac_gcc_archflag.m4 b/libs/libks/build/config/ac_gcc_archflag.m4 new file mode 100644 index 0000000000..b38a564902 --- /dev/null +++ b/libs/libks/build/config/ac_gcc_archflag.m4 @@ -0,0 +1,148 @@ +AC_DEFUN([AX_GCC_ARCHFLAG], +[AC_REQUIRE([AC_PROG_CC]) + +AC_ARG_WITH(gcc-arch, [AC_HELP_STRING([--with-gcc-arch=], [use architecture for gcc -march/-mtune, instead of guessing])], + ax_gcc_arch=$withval, ax_gcc_arch=yes) + +AC_MSG_CHECKING([for gcc architecture flag]) +AC_MSG_RESULT([]) +AC_CACHE_VAL(ax_cv_gcc_archflag, +[ +ax_cv_gcc_archflag="unknown" + +if test "$GCC" = yes; then + +if test "x$ax_gcc_arch" = xyes; then +ax_gcc_arch="" +if test "$cross_compiling" = no; then +case $host_cpu in + i[[3456]]86*|x86_64*) # use cpuid codes, in part from x86info-1.7 by D. Jones + AX_GCC_X86_CPUID(0) + AX_GCC_X86_CPUID(1) + case $ax_cv_gcc_x86_cpuid_0 in + *:756e6547:*:*) # Intel + case $ax_cv_gcc_x86_cpuid_1 in + *5[[48]]?:*:*:*) ax_gcc_arch="pentium-mmx pentium" ;; + *5??:*:*:*) ax_gcc_arch=pentium ;; + *6[[3456]]?:*:*:*) ax_gcc_arch="pentium2 pentiumpro" ;; + *6a?:*[[01]]:*:*) ax_gcc_arch="pentium2 pentiumpro" ;; + *6a?:*[[234]]:*:*) ax_gcc_arch="pentium3 pentiumpro" ;; + *6[[9d]]?:*:*:*) ax_gcc_arch="pentium-m pentium3 pentiumpro" ;; + *6[[78b]]?:*:*:*) ax_gcc_arch="pentium3 pentiumpro" ;; + *6??:*:*:*) ax_gcc_arch=pentiumpro ;; + *f3[[347]]:*:*:*|*f4[1347]:*:*:*) + case $host_cpu in + x86_64*) ax_gcc_arch="nocona pentium4 pentiumpro" ;; + *) ax_gcc_arch="prescott pentium4 pentiumpro" ;; + esac ;; + *f??:*:*:*) ax_gcc_arch="pentium4 pentiumpro";; + esac ;; + *:68747541:*:*) # AMD + case $ax_cv_gcc_x86_cpuid_1 in + *5[[67]]?:*:*:*) ax_gcc_arch=k6 ;; + *5[[8d]]?:*:*:*) ax_gcc_arch="k6-2 k6" ;; + *5[[9]]?:*:*:*) ax_gcc_arch="k6-3 k6" ;; + *60?:*:*:*) ax_gcc_arch=k7 ;; + *6[[12]]?:*:*:*) ax_gcc_arch="athlon k7" ;; + *6[[34]]?:*:*:*) ax_gcc_arch="athlon-tbird k7" ;; + *67?:*:*:*) ax_gcc_arch="athlon-4 athlon k7" ;; + *6[[68a]]?:*:*:*) + AX_GCC_X86_CPUID(0x80000006) # L2 cache size + case $ax_cv_gcc_x86_cpuid_0x80000006 in + *:*:*[[1-9a-f]]??????:*) # (L2 = ecx >> 16) >= 256 + ax_gcc_arch="athlon-xp athlon-4 athlon k7" ;; + *) ax_gcc_arch="athlon-4 athlon k7" ;; + esac ;; + *f[[4cef8b]]?:*:*:*) ax_gcc_arch="athlon64 k8" ;; + *f5?:*:*:*) ax_gcc_arch="opteron k8" ;; + *f7?:*:*:*) ax_gcc_arch="athlon-fx opteron k8" ;; + *f??:*:*:*) ax_gcc_arch="k8" ;; + esac ;; + *:746e6543:*:*) # IDT + case $ax_cv_gcc_x86_cpuid_1 in + *54?:*:*:*) ax_gcc_arch=winchip-c6 ;; + *58?:*:*:*) ax_gcc_arch=winchip2 ;; + *6[[78]]?:*:*:*) ax_gcc_arch=c3 ;; + *69?:*:*:*) ax_gcc_arch="c3-2 c3" ;; + esac ;; + esac + if test x"$ax_gcc_arch" = x; then # fallback + case $host_cpu in + i586*) ax_gcc_arch=pentium ;; + i686*) ax_gcc_arch=pentiumpro ;; + esac + fi + ;; + + sparc*) + AC_PATH_PROG([PRTDIAG], [prtdiag], [prtdiag], [$PATH:/usr/platform/`uname -i`/sbin/:/usr/platform/`uname -m`/sbin/]) + cputype=`(((grep cpu /proc/cpuinfo | cut -d: -f2) ; ($PRTDIAG -v |grep -i sparc) ; grep -i cpu /var/run/dmesg.boot ) | head -n 1) 2> /dev/null` + cputype=`echo "$cputype" | tr -d ' -' |tr $as_cr_LETTERS $as_cr_letters` + case $cputype in + *ultrasparciv*) ax_gcc_arch="ultrasparc4 ultrasparc3 ultrasparc v9" ;; + *ultrasparciii*) ax_gcc_arch="ultrasparc3 ultrasparc v9" ;; + *ultrasparc*) ax_gcc_arch="ultrasparc v9" ;; + *supersparc*|*tms390z5[[05]]*) ax_gcc_arch="supersparc v8" ;; + *hypersparc*|*rt62[[056]]*) ax_gcc_arch="hypersparc v8" ;; + *cypress*) ax_gcc_arch=cypress ;; + esac ;; + + alphaev5) ax_gcc_arch=ev5 ;; + alphaev56) ax_gcc_arch=ev56 ;; + alphapca56) ax_gcc_arch="pca56 ev56" ;; + alphapca57) ax_gcc_arch="pca57 pca56 ev56" ;; + alphaev6) ax_gcc_arch=ev6 ;; + alphaev67) ax_gcc_arch=ev67 ;; + alphaev68) ax_gcc_arch="ev68 ev67" ;; + alphaev69) ax_gcc_arch="ev69 ev68 ev67" ;; + alphaev7) ax_gcc_arch="ev7 ev69 ev68 ev67" ;; + alphaev79) ax_gcc_arch="ev79 ev7 ev69 ev68 ev67" ;; + + powerpc*) + cputype=`((grep cpu /proc/cpuinfo | head -n 1 | cut -d: -f2 | cut -d, -f1 | sed 's/ //g') ; /usr/bin/machine ; /bin/machine; grep CPU /var/run/dmesg.boot | head -n 1 | cut -d" " -f2) 2> /dev/null` + cputype=`echo $cputype | sed -e 's/ppc//g;s/ *//g'` + case $cputype in + *750*) ax_gcc_arch="750 G3" ;; + *740[[0-9]]*) ax_gcc_arch="$cputype 7400 G4" ;; + *74[[4-5]][[0-9]]*) ax_gcc_arch="$cputype 7450 G4" ;; + *74[[0-9]][[0-9]]*) ax_gcc_arch="$cputype G4" ;; + *970*) ax_gcc_arch="970 G5 power4";; + *POWER4*|*power4*|*gq*) ax_gcc_arch="power4 970";; + *POWER5*|*power5*|*gr*|*gs*) ax_gcc_arch="power5 power4 970";; + 603ev|8240) ax_gcc_arch="$cputype 603e 603";; + *) ax_gcc_arch=$cputype ;; + esac + ax_gcc_arch="$ax_gcc_arch powerpc" + ;; +esac +fi # not cross-compiling +fi # guess arch + +if test "x$ax_gcc_arch" != x -a "x$ax_gcc_arch" != xno; then +for arch in $ax_gcc_arch; do + if test "x[]m4_default([$1],yes)" = xyes; then # if we require portable code + flags="-mtune=$arch" + # -mcpu=$arch and m$arch generate nonportable code on every arch except + # x86. And some other arches (e.g. Alpha) don't accept -mtune. Grrr. + case $host_cpu in i*86|x86_64*) flags="$flags -mcpu=$arch -m$arch";; esac + else + flags="-march=$arch -mcpu=$arch -m$arch" + fi + for flag in $flags; do + AX_CHECK_COMPILER_FLAGS($flag, [ax_cv_gcc_archflag=$flag; break]) + done + test "x$ax_cv_gcc_archflag" = xunknown || break +done +fi + +fi # $GCC=yes +]) +AC_MSG_CHECKING([for gcc architecture flag]) +AC_MSG_RESULT($ax_cv_gcc_archflag) +if test "x$ax_cv_gcc_archflag" = xunknown; then + m4_default([$3],:) +else + m4_default([$2], [CFLAGS="$CFLAGS $ax_cv_gcc_archflag"]) +fi +]) + diff --git a/libs/libks/build/config/ac_gcc_x86_cpuid.m4 b/libs/libks/build/config/ac_gcc_x86_cpuid.m4 new file mode 100644 index 0000000000..3cf22d0dde --- /dev/null +++ b/libs/libks/build/config/ac_gcc_x86_cpuid.m4 @@ -0,0 +1,21 @@ +AC_DEFUN([AX_GCC_X86_CPUID], +[AC_REQUIRE([AC_PROG_CC]) +AC_LANG_PUSH([C]) +AC_CACHE_CHECK(for x86 cpuid $1 output, ax_cv_gcc_x86_cpuid_$1, + [AC_RUN_IFELSE([AC_LANG_PROGRAM([#include ], [ + int op = $1, eax, ebx, ecx, edx; + FILE *f; + __asm__("cpuid" + : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) + : "a" (op)); + f = fopen("conftest_cpuid", "w"); if (!f) return 1; + fprintf(f, "%x:%x:%x:%x\n", eax, ebx, ecx, edx); + fclose(f); + return 0; +])], + [ax_cv_gcc_x86_cpuid_$1=`cat conftest_cpuid`; rm -f conftest_cpuid], + [ax_cv_gcc_x86_cpuid_$1=unknown; rm -f conftest_cpuid], + [ax_cv_gcc_x86_cpuid_$1=unknown])]) +AC_LANG_POP([C]) +]) + diff --git a/libs/libks/build/config/ac_prog_gzip.m4 b/libs/libks/build/config/ac_prog_gzip.m4 new file mode 100644 index 0000000000..f37a4cc9ce --- /dev/null +++ b/libs/libks/build/config/ac_prog_gzip.m4 @@ -0,0 +1,9 @@ +AC_DEFUN([AC_PROG_GZIP],[ +AC_CHECK_PROGS(gzip,[gzip],no) +export gzip; +if test $gzip = "no" ; +then + AC_MSG_ERROR([Unable to find the gzip application]); +fi +AC_SUBST(gzip) +]) diff --git a/libs/libks/build/config/ac_prog_wget.m4 b/libs/libks/build/config/ac_prog_wget.m4 new file mode 100644 index 0000000000..56b6b8334f --- /dev/null +++ b/libs/libks/build/config/ac_prog_wget.m4 @@ -0,0 +1,9 @@ +AC_DEFUN([AC_PROG_WGET],[ +AC_CHECK_PROGS(wget,[wget],no) +export wget; +if test $wget = "no" ; +then + AC_MSG_ERROR([Unable to find the wget application]); +fi +AC_SUBST(wget) +]) diff --git a/libs/libks/build/config/ax_cc_maxopt.m4 b/libs/libks/build/config/ax_cc_maxopt.m4 new file mode 100644 index 0000000000..6205ee84c8 --- /dev/null +++ b/libs/libks/build/config/ax_cc_maxopt.m4 @@ -0,0 +1,120 @@ +AC_DEFUN([AX_CC_MAXOPT], +[ +AC_REQUIRE([AC_PROG_CC]) +AC_REQUIRE([AX_COMPILER_VENDOR]) + +AC_ARG_ENABLE(portable-binary, [AC_HELP_STRING([--enable-portable-binary], [disable compiler optimizations that would produce unportable binaries])], + acx_maxopt_portable=$withval, acx_maxopt_portable=no) + +# Try to determine "good" native compiler flags if none specified via CFLAGS +if test "$ac_test_CFLAGS" != "set"; then + CFLAGS="" + case $ax_cv_c_compiler_vendor in + dec) CFLAGS="-newc -w0 -O5 -ansi_alias -ansi_args -fp_reorder -tune host" + if test "x$acx_maxopt_portable" = xno; then + CFLAGS="$CFLAGS -arch host" + fi;; + + sun) CFLAGS="-native -fast -xO5 -dalign -xc99=all" + if test "x$acx_maxopt_portable" = xyes; then + CFLAGS="$CFLAGS -xarch=generic" + fi;; + + hp) CFLAGS="+Oall +Optrs_ansi +DSnative" + if test "x$acx_maxopt_portable" = xyes; then + CFLAGS="$CFLAGS +DAportable" + fi;; + + ibm) if test "x$acx_maxopt_portable" = xno; then + xlc_opt="-qarch=auto -qtune=auto" + else + xlc_opt="-qtune=auto" + fi + AX_CHECK_COMPILER_FLAGS($xlc_opt, + CFLAGS="-O3 -qansialias -w $xlc_opt", + [CFLAGS="-O3 -qansialias -w" + echo "******************************************************" + echo "* You seem to have the IBM C compiler. It is *" + echo "* recommended for best performance that you use: *" + echo "* *" + echo "* CFLAGS=-O3 -qarch=xxx -qtune=xxx -qansialias -w *" + echo "* ^^^ ^^^ *" + echo "* where xxx is pwr2, pwr3, 604, or whatever kind of *" + echo "* CPU you have. (Set the CFLAGS environment var. *" + echo "* and re-run configure.) For more info, man cc. *" + echo "******************************************************"]) + ;; + + intel) CFLAGS="-O3 -ansi_alias" + if test "x$acx_maxopt_portable" = xno; then + icc_archflag=unknown + icc_flags="" + case $host_cpu in + i686*|x86_64*) + # icc accepts gcc assembly syntax, so these should work: + AX_GCC_X86_CPUID(0) + AX_GCC_X86_CPUID(1) + case $ax_cv_gcc_x86_cpuid_0 in # see AX_GCC_ARCHFLAG + *:756e6547:*:*) # Intel + case $ax_cv_gcc_x86_cpuid_1 in + *6a?:*[[234]]:*:*|*6[[789b]]?:*:*:*) icc_flags="-xK";; + *f3[[347]]:*:*:*|*f4[1347]:*:*:*) icc_flags="-xP -xN -xW -xK";; + *f??:*:*:*) icc_flags="-xN -xW -xK";; + esac ;; + esac ;; + esac + if test "x$icc_flags" != x; then + for flag in $icc_flags; do + AX_CHECK_COMPILER_FLAGS($flag, [icc_archflag=$flag; break]) + done + fi + AC_MSG_CHECKING([for icc architecture flag]) + AC_MSG_RESULT($icc_archflag) + if test "x$icc_archflag" != xunknown; then + CFLAGS="$CFLAGS $icc_archflag" + fi + fi + ;; + + gnu) + # default optimization flags for gcc on all systems + CFLAGS="-O3 -fomit-frame-pointer" + + # -malign-double for x86 systems + AX_CHECK_COMPILER_FLAGS(-malign-double, CFLAGS="$CFLAGS -malign-double") + + # -fstrict-aliasing for gcc-2.95+ + AX_CHECK_COMPILER_FLAGS(-fstrict-aliasing, + CFLAGS="$CFLAGS -fstrict-aliasing") + + # note that we enable "unsafe" fp optimization with other compilers, too + AX_CHECK_COMPILER_FLAGS(-ffast-math, CFLAGS="$CFLAGS -ffast-math") + + AX_GCC_ARCHFLAG($acx_maxopt_portable) + ;; + esac + + if test -z "$CFLAGS"; then + echo "" + echo "********************************************************" + echo "* WARNING: Don't know the best CFLAGS for this system *" + echo "* Use ./configure CFLAGS=... to specify your own flags *" + echo "* (otherwise, a default of CFLAGS=-O3 will be used) *" + echo "********************************************************" + echo "" + CFLAGS="-O3" + fi + + AX_CHECK_COMPILER_FLAGS($CFLAGS, [], [ + echo "" + echo "********************************************************" + echo "* WARNING: The guessed CFLAGS don't seem to work with *" + echo "* your compiler. *" + echo "* Use ./configure CFLAGS=... to specify your own flags *" + echo "********************************************************" + echo "" + CFLAGS="" + ]) + +fi +]) diff --git a/libs/libks/build/config/ax_cflags_warn_all_ansi.m4 b/libs/libks/build/config/ax_cflags_warn_all_ansi.m4 new file mode 100644 index 0000000000..5b35464457 --- /dev/null +++ b/libs/libks/build/config/ax_cflags_warn_all_ansi.m4 @@ -0,0 +1,94 @@ +AC_DEFUN([AX_CFLAGS_WARN_ALL_ANSI],[dnl +AS_VAR_PUSHDEF([FLAGS],[CFLAGS])dnl +AS_VAR_PUSHDEF([VAR],[ac_cv_cflags_warn_all_ansi])dnl +AC_CACHE_CHECK([m4_ifval($1,$1,FLAGS) for maximum ansi warnings], +VAR,[VAR="no, unknown" + AC_LANG_SAVE + AC_LANG_C + ac_save_[]FLAGS="$[]FLAGS" +# IRIX C compiler: +# -use_readonly_const is the default for IRIX C, +# puts them into .rodata, but they are copied later. +# need to be "-G0 -rdatashared" for strictmode but +# I am not sure what effect that has really. - guidod +for ac_arg dnl +in "-pedantic % -Wall -std=c99 -pedantic" dnl GCC + "-xstrconst % -v -xc99=all" dnl Solaris C + "-std1 % -verbose -w0 -warnprotos -std1" dnl Digital Unix + " % -qlanglvl=ansi -qsrcmsg -qinfo=all:noppt:noppc:noobs:nocnd" dnl AIX + " % -ansi -ansiE -fullwarn" dnl IRIX + "+ESlit % +w1 -Aa" dnl HP-UX C + "-Xc % -pvctl[,]fullmsg -Xc" dnl NEC SX-5 (Super-UX 10) + "-h conform % -h msglevel 2 -h conform" dnl Cray C (Unicos) + # +do FLAGS="$ac_save_[]FLAGS "`echo $ac_arg | sed -e 's,%%.*,,' -e 's,%,,'` + AC_TRY_COMPILE([],[return 0;], + [VAR=`echo $ac_arg | sed -e 's,.*% *,,'` ; break]) +done + FLAGS="$ac_save_[]FLAGS" + AC_LANG_RESTORE +]) +case ".$VAR" in + .ok|.ok,*) m4_ifvaln($3,$3) ;; + .|.no|.no,*) m4_ifvaln($4,$4,[m4_ifval($2,[ + AC_RUN_LOG([: m4_ifval($1,$1,FLAGS)="$m4_ifval($1,$1,FLAGS) $2"]) + m4_ifval($1,$1,FLAGS)="$m4_ifval($1,$1,FLAGS) $2"])]) ;; + *) m4_ifvaln($3,$3,[ + if echo " $[]m4_ifval($1,$1,FLAGS) " | grep " $VAR " 2>&1 >/dev/null + then AC_RUN_LOG([: m4_ifval($1,$1,FLAGS) does contain $VAR]) + else AC_RUN_LOG([: m4_ifval($1,$1,FLAGS)="$m4_ifval($1,$1,FLAGS) $VAR"]) + m4_ifval($1,$1,FLAGS)="$m4_ifval($1,$1,FLAGS) $VAR" + fi ]) ;; +esac +AS_VAR_POPDEF([VAR])dnl +AS_VAR_POPDEF([FLAGS])dnl +]) + +dnl the only difference - the LANG selection... and the default FLAGS + +AC_DEFUN([AX_CXXFLAGS_WARN_ALL_ANSI],[dnl +AS_VAR_PUSHDEF([FLAGS],[CXXFLAGS])dnl +AS_VAR_PUSHDEF([VAR],[ac_cv_cxxflags_warn_all_ansi])dnl +AC_CACHE_CHECK([m4_ifval($1,$1,FLAGS) for maximum ansi warnings], +VAR,[VAR="no, unknown" + AC_LANG_SAVE + AC_LANG_CXX + ac_save_[]FLAGS="$[]FLAGS" +# IRIX C compiler: +# -use_readonly_const is the default for IRIX C, +# puts them into .rodata, but they are copied later. +# need to be "-G0 -rdatashared" for strictmode but +# I am not sure what effect that has really. - guidod +for ac_arg dnl +in "-pedantic % -Wall -ansi -pedantic" dnl GCC + "-xstrconst % -v -Xc" dnl Solaris C + "-std1 % -verbose -w0 -warnprotos -std1" dnl Digital Unix + " % -qlanglvl=ansi -qsrcmsg -qinfo=all:noppt:noppc:noobs:nocnd" dnl AIX + " % -ansi -ansiE -fullwarn" dnl IRIX + "+ESlit % +w1 -Aa" dnl HP-UX C + "-Xc % -pvctl[,]fullmsg -Xc" dnl NEC SX-5 (Super-UX 10) + "-h conform % -h msglevel 2 -h conform" dnl Cray C (Unicos) + # +do FLAGS="$ac_save_[]FLAGS "`echo $ac_arg | sed -e 's,%%.*,,' -e 's,%,,'` + AC_TRY_COMPILE([],[return 0;], + [VAR=`echo $ac_arg | sed -e 's,.*% *,,'` ; break]) +done + FLAGS="$ac_save_[]FLAGS" + AC_LANG_RESTORE +]) +case ".$VAR" in + .ok|.ok,*) m4_ifvaln($3,$3) ;; + .|.no|.no,*) m4_ifvaln($4,$4,[m4_ifval($2,[ + AC_RUN_LOG([: m4_ifval($1,$1,FLAGS)="$m4_ifval($1,$1,FLAGS) $2"]) + m4_ifval($1,$1,FLAGS)="$m4_ifval($1,$1,FLAGS) $2"])]) ;; + *) m4_ifvaln($3,$3,[ + if echo " $[]m4_ifval($1,$1,FLAGS) " | grep " $VAR " 2>&1 >/dev/null + then AC_RUN_LOG([: m4_ifval($1,$1,FLAGS) does contain $VAR]) + else AC_RUN_LOG([: m4_ifval($1,$1,FLAGS)="$m4_ifval($1,$1,FLAGS) $VAR"]) + m4_ifval($1,$1,FLAGS)="$m4_ifval($1,$1,FLAGS) $VAR" + fi ]) ;; +esac +AS_VAR_POPDEF([VAR])dnl +AS_VAR_POPDEF([FLAGS])dnl +]) + diff --git a/libs/libks/build/config/ax_check_compiler_flags.m4 b/libs/libks/build/config/ax_check_compiler_flags.m4 new file mode 100644 index 0000000000..73377b7c59 --- /dev/null +++ b/libs/libks/build/config/ax_check_compiler_flags.m4 @@ -0,0 +1,26 @@ +AC_DEFUN([AX_CHECK_COMPILER_FLAGS], +[AC_PREREQ(2.59) dnl for _AC_LANG_PREFIX +AC_MSG_CHECKING([whether _AC_LANG compiler accepts $1]) +dnl Some hackery here since AC_CACHE_VAL can't handle a non-literal varname: +AS_LITERAL_IF([$1], + [AC_CACHE_VAL(AS_TR_SH(ax_cv_[]_AC_LANG_ABBREV[]_flags_$1), [ + ax_save_FLAGS=$[]_AC_LANG_PREFIX[]FLAGS + _AC_LANG_PREFIX[]FLAGS="$1" + AC_COMPILE_IFELSE([AC_LANG_PROGRAM()], + AS_TR_SH(ax_cv_[]_AC_LANG_ABBREV[]_flags_$1)=yes, + AS_TR_SH(ax_cv_[]_AC_LANG_ABBREV[]_flags_$1)=no) + _AC_LANG_PREFIX[]FLAGS=$ax_save_FLAGS])], + [ax_save_FLAGS=$[]_AC_LANG_PREFIX[]FLAGS + _AC_LANG_PREFIX[]FLAGS="$1" + AC_COMPILE_IFELSE([AC_LANG_PROGRAM()], + eval AS_TR_SH(ax_cv_[]_AC_LANG_ABBREV[]_flags_$1)=yes, + eval AS_TR_SH(ax_cv_[]_AC_LANG_ABBREV[]_flags_$1)=no) + _AC_LANG_PREFIX[]FLAGS=$ax_save_FLAGS]) +eval ax_check_compiler_flags=$AS_TR_SH(ax_cv_[]_AC_LANG_ABBREV[]_flags_$1) +AC_MSG_RESULT($ax_check_compiler_flags) +if test "x$ax_check_compiler_flags" = xyes; then + m4_default([$2], :) +else + m4_default([$3], :) +fi +])dnl AX_CHECK_COMPILER_FLAG diff --git a/libs/libks/build/config/ax_compiler_vendor.m4 b/libs/libks/build/config/ax_compiler_vendor.m4 new file mode 100644 index 0000000000..a24a58da0f --- /dev/null +++ b/libs/libks/build/config/ax_compiler_vendor.m4 @@ -0,0 +1,15 @@ +AC_DEFUN([AX_COMPILER_VENDOR], +[ +AC_CACHE_CHECK([for _AC_LANG compiler vendor], ax_cv_[]_AC_LANG_ABBREV[]_compiler_vendor, + [ax_cv_[]_AC_LANG_ABBREV[]_compiler_vendor=unknown + # note: don't check for gcc first since some other compilers define __GNUC__ + for ventest in intel:__ICC,__ECC,__INTEL_COMPILER ibm:__xlc__,__xlC__,__IBMC__,__IBMCPP__ gnu:__GNUC__ sun:__SUNPRO_C,__SUNPRO_CC hp:__HP_cc,__HP_aCC dec:__DECC,__DECCXX,__DECC_VER,__DECCXX_VER borland:__BORLANDC__,__TURBOC__ comeau:__COMO__ cray:_CRAYC kai:__KCC lcc:__LCC__ metrowerks:__MWERKS__ sgi:__sgi,sgi microsoft:_MSC_VER watcom:__WATCOMC__ portland:__PGI; do + vencpp="defined("`echo $ventest | cut -d: -f2 | sed 's/,/) || defined(/g'`")" + AC_COMPILE_IFELSE([AC_LANG_PROGRAM(,[ +#if !($vencpp) + thisisanerror; +#endif +])], [ax_cv_]_AC_LANG_ABBREV[_compiler_vendor=`echo $ventest | cut -d: -f1`; break]) + done + ]) +]) diff --git a/libs/libks/build/config/sac-openssl.m4 b/libs/libks/build/config/sac-openssl.m4 new file mode 100644 index 0000000000..289d3e132e --- /dev/null +++ b/libs/libks/build/config/sac-openssl.m4 @@ -0,0 +1,49 @@ +dnl ====================================================================== +dnl SAC_OPENSSL +dnl ====================================================================== +AC_DEFUN([SAC_OPENSSL], [ + +AC_ARG_WITH(openssl, +[ --with-openssl use OpenSSL [[enabled]]],, with_openssl=pkg-config) + +dnl SOSXXX:SAC_ASSERT_DEF([openssl libraries]) + + +if test "$with_openssl" = no ;then + : # No openssl +else + + if test "$with_openssl" = "pkg-config" ; then + PKG_CHECK_MODULES(openssl, openssl, + [HAVE_TLS=1 HAVE_OPENSSL=1 LIBS="$openssl_LIBS $LIBS"], + [HAVE_OPENSSL=0]) + fi + + if test x$HAVE_OPENSSL = x1 ; then + AC_DEFINE([HAVE_LIBCRYPTO], 1, [Define to 1 if you have the `crypto' library (-lcrypto).]) + AC_DEFINE([HAVE_LIBSSL], 1, [Define to 1 if you have the `ssl' library (-lssl).]) + else + AC_CHECK_HEADERS([openssl/tls1.h], [ + HAVE_OPENSSL=1 HAVE_TLS=1 + + AC_CHECK_LIB(crypto, BIO_new,, + HAVE_OPENSSL=0 + AC_MSG_WARN(OpenSSL crypto library was not found)) + + AC_CHECK_LIB(ssl, TLSv1_method,, + HAVE_TLS=0 + AC_MSG_WARN(OpenSSL protocol library was not found)) + ],[AC_MSG_WARN(OpenSSL include files were not found)],[#include ]) + fi + + if test x$HAVE_OPENSSL = x1; then + AC_DEFINE([HAVE_OPENSSL], 1, [Define to 1 if you have OpenSSL]) + fi + + if test x$HAVE_TLS = x1; then + AC_DEFINE([HAVE_TLS], 1, [Define to 1 if you have TLS]) + fi +fi + +AM_CONDITIONAL(HAVE_TLS, test x$HAVE_TLS = x1) +]) diff --git a/libs/libks/configure.ac b/libs/libks/configure.ac new file mode 100644 index 0000000000..3ebd6fa2cb --- /dev/null +++ b/libs/libks/configure.ac @@ -0,0 +1,258 @@ +# -*- Autoconf -*- +# Process this file with autoconf to produce a configure script. + +AC_PREREQ(2.59) +AC_INIT(libks, 0.1, bugs@freeswitch.org) +AC_CONFIG_AUX_DIR(build) +AC_CONFIG_MACRO_DIR([build]) +AM_INIT_AUTOMAKE +AC_CONFIG_SRCDIR([src]) + +# disable checks +m4_defun([_LT_AC_LANG_CXX_CONFIG], [:]) +m4_defun([_LT_AC_LANG_F77_CONFIG], [:]) + +m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])]) + +# Checks for programs. +AC_PROG_CC +AC_PROG_CXX +AC_PROG_MAKE_SET +AC_PROG_LIBTOOL +AC_PROG_INSTALL + +# Optimize +AC_ARG_ENABLE(optimization, +[AC_HELP_STRING([--enable-optimization],[Set if you want us to add max optimising compiler flags])],[enable_optimizer="$enableval"],[enable_optimizer="no"]) + +if test "${enable_optimizer}" = "yes" ; then + AC_DEFINE([OPTIMZER],[],[Enable Optimization.]) + AX_CC_MAXOPT +fi + +# Enable debugging +AC_ARG_ENABLE(debug, +[AC_HELP_STRING([--enable-debug],[build with debug information])],[enable_debug="$enable_debug"],[enable_debug="no"]) + +if test "${enable_debug}" = "yes"; then + AC_DEFINE([DEBUG],[],[Enable extra debugging.]) +fi + +AM_CONDITIONAL([WANT_DEBUG],[test "${enable_debug}" = "yes"]) + +dnl check for the compiler used +AX_COMPILER_VENDOR + +case "$host" in + *-solaris2*) + if test "x${ax_cv_c_compiler_vendor}" = "xsun" ; then + AM_CFLAGS="-KPIC -DPIC" + AM_LDFLAGS="-R${prefix}/lib" + fi + ;; + *-darwin*) + if test "x${ax_cv_c_compiler_vendor}" = "xgnu" ; then + AM_CFLAGS="-DMACOSX" + fi + ;; + x86_64-unknown-linux-gnu) + AM_CFLAGS="-fPIC" + AM_LDFLAGS="" + ;; + i*6-unknown-linux-gnu) + AM_CFLAGS="-fpic" + AM_LDFLAGS="" + ;; + x86_64-*-freebsd*|amd64-*-freebsd*) + AM_CFLAGS="-fpic" + AM_LDFLAGS="" + ;; + i*6-*-freebsd*) + AM_CFLAGS="-fpic" + AM_LDFLAGS="" + ;; +esac + +AX_CFLAGS_WARN_ALL_ANSI + +AC_CHECK_LIB(rt, clock_gettime, [AC_DEFINE(HAVE_CLOCK_GETTIME, 1, [Define if you have clock_gettime()])]) +AC_CHECK_LIB(rt, clock_getres, [AC_DEFINE(HAVE_CLOCK_GETRES, 1, [Define if you have clock_getres()])]) +AC_CHECK_LIB(rt, clock_nanosleep, [AC_DEFINE(HAVE_CLOCK_NANOSLEEP, 1, [Define if you have clock_nanosleep()])]) +AC_CHECK_FUNCS([usleep]) + +# +# sched_setcheduler + round-robin scheduler prerequisites +# +AC_CHECK_HEADERS([sched.h byteswap.h sys/endian.h]) +AC_CHECK_DECL([SCHED_RR], + [AC_DEFINE([HAVE_SCHED_RR],[1],[SCHED_RR constant for sched_setscheduler])],, + [#ifdef HAVE_SCHED_H + #include + #endif]) +AC_CHECK_FUNCS([sched_setscheduler memmem]) + +if test "x${ac_cv_func_sched_setscheduler}" = "xyes" -a \ + "x${ac_cv_have_decl_SCHED_RR}" = "xyes" +then + AC_DEFINE([USE_SCHED_SETSCHEDULER],[1],[Enable round-robin scheduler using sched_setscheduler]) + AM_CFLAGS="${AM_CFLAGS} -DUSE_SCHED_SETSCHEDULER=1" +fi + + + +# +# gcc visibility cflag checks +# +AC_ARG_ENABLE([visibility], + [AS_HELP_STRING([--disable-visibility], [Disable or enable API visibility support (default: use if available)])], + [enable_visibility="${enableval}"], + [enable_visibility="detect"] +) +HAVE_VISIBILITY="no" + +if test "x${enable_visibility}" != "xno" ; then + + case "${ax_cv_c_compiler_vendor}" in + gnu) + save_CFLAGS="${CFLAGS}" + CFLAGS="${CFLAGS} -fvisibility=hidden" + AC_MSG_CHECKING([whether the compiler supports -fvisibility=hidden]) + AC_COMPILE_IFELSE( + [AC_LANG_PROGRAM( + [int foo __attribute__ ((visibility("default")));], + [;] + )], + + [AC_MSG_RESULT([yes]) + AM_CFLAGS="${AM_CFLAGS} -DKS_API_VISIBILITY=1 -fvisibility=hidden" + AC_DEFINE([HAVE_VISIBILITY], [1], [GCC visibility support available]) + HAVE_VISIBILITY="yes"], + + [AC_MSG_RESULT([no])] + ) + CFLAGS="${save_CFLAGS}" + ;; + + sun) + save_CFLAGS="${CFLAGS}" + CFLAGS="${CFLAGS} -xldscope=hidden" + AC_MSG_CHECKING([whether the compiler supports -xldscope=hidden]) + AC_COMPILE_IFELSE( + [AC_LANG_PROGRAM( + [int foo __attribute__ ((visibility("default")));], + [;] + )], + + [AC_MSG_RESULT([yes]) + AM_CFLAGS="${AM_CFLAGS} -DKS_API_VISIBILITY=1 -xldscope=hidden" + AC_DEFINE([HAVE_VISIBILITY], [1], [SUNCC visibility support available]) + HAVE_VISIBILITY="yes"], + + [AC_MSG_RESULT([no])] + ) + CFLAGS="${save_CFLAGS}" + ;; + + *) + if test "x${enable_visibility}" = "xyes" ; then + AC_MSG_ERROR([Non-GNU / SUN compilers are currently unsupported]) + else + AC_MSG_WARN([Non-GNU / SUN compilers are currently unsupported]) + fi + ;; + esac + + # + # visibility explicitly requested but not supported by this compiler => error + # + if test "x${enable_visibility}" = "xyes" -a "x${HAVE_VISIBILITY}" = "xno" ; then + AC_MSG_ERROR([API visibility not supported by this compiler]) + fi +fi + +AM_CFLAGS="${AM_CFLAGS} -Werror" +AC_SUBST(AM_CFLAGS) +AC_SUBST(AM_LDFLAGS) + +# Checks for header files. +AC_HEADER_DIRENT +AC_HEADER_STDC + +# Checks for typedefs, structures, and compiler characteristics. +AC_C_CONST +AC_C_INLINE +AC_TYPE_SIZE_T +AC_HEADER_TIME +AC_STRUCT_TM + +# Checks for library functions. +AC_PROG_GCC_TRADITIONAL +AC_FUNC_MALLOC +AC_TYPE_SIGNAL +AC_FUNC_STRFTIME + +AC_CHECK_LIB(pthread, pthread_setschedparam, [ +AC_DEFINE(HAVE_PTHREAD_SETSCHEDPARAM, 1, [Define if you have pthread_setschedparam()]) +AM_CFLAGS="${AM_CFLAGS} -DHAVE_PTHREAD_SETSCHEDPARAM=1" +]) + +AC_C_BIGENDIAN(AC_DEFINE([__BYTE_ORDER],__BIG_ENDIAN,[Big Endian]),AC_DEFINE([__BYTE_ORDER],__LITTLE_ENDIAN,[Little Endian])) +AC_DEFINE([__LITTLE_ENDIAN],1234,[for the places where it is not defined]) +AC_DEFINE([__BIG_ENDIAN],4321,[for the places where it is not defined]) + +path_remove () { + echo "$1" | tr ':' '\n' | grep -Fxv "$2" | tr '\n' ':' | sed 's/:$//' +} +path_push_unique () { + x="$(eval echo \$$1)" + x="$(path_remove "$x" "$2")" + if test -z "$x"; then + eval export $1="$2" + else + eval export $1="$2:$x" + fi +} + +case $host in + *-darwin*) + path_push_unique PKG_CONFIG_PATH /usr/local/opt/openssl/lib/pkgconfig + ;; +esac + +SAC_OPENSSL + +if test x$HAVE_OPENSSL = x1; then + openssl_CFLAGS="$openssl_CFLAGS -DHAVE_OPENSSL"; + AM_CFLAGS="${AM_CFLAGS} ${openssl_CFLAGS} -DHAVE_OPENSSL" + AM_LDFLAGS="${AM_LDFLAGS} ${openssl_LIBS}" +else + AC_MSG_ERROR([OpenSSL and associated developement headers required]) +fi + + +# Enable clang address sanitizer bit build +AC_ARG_ENABLE(address_sanitizer, + [AC_HELP_STRING([--enable-address-sanitizer],[build with address sanitizer])], + [enable_address_sanitizer="$enable_address_sanitizer"], + [enable_address_sanitizer="no"]) + +if test "${enable_address_sanitizer}" = "yes"; then + if test "x${ax_cv_c_compiler_vendor}" = "xclang" ; then + AM_CFLAGS="${AM_CFLAGS} -fsanitize=address -fno-omit-frame-pointer" + AM_CXXFLAGS="${AM_CXXFLAGS} -fsanitize=address -fno-omit-frame-pointer" + AM_LDFLAGS="${AM_LDFLAGS} -fsanitize=address" + fi +fi + +PKG_CHECK_MODULES([SODIUM], [libsodium >= 1.0.0],[AC_MSG_RESULT([yes])],[AC_MSG_ERROR([libsodium is required])]) +PKG_CHECK_MODULES([UUID], [uuid >= 1.0.0],[AC_MSG_RESULT([yes])],[AC_MSG_ERROR([libuuid is required])]) + +AM_CFLAGS="${AM_CFLAGS} -Werror ${SODIUM_CFLAGS} ${UUID_CFLAGS}" +AM_LDFLAGS="${AM_LDFLAGS} ${SODIUM_LIBS} ${UUID_LIBS}" + +AC_CONFIG_FILES([Makefile + test/Makefile + libks.pc +]) + +AC_OUTPUT diff --git a/libs/libks/crypt/aes.h b/libs/libks/crypt/aes.h new file mode 100644 index 0000000000..e05d58b826 --- /dev/null +++ b/libs/libks/crypt/aes.h @@ -0,0 +1,198 @@ +/* +--------------------------------------------------------------------------- +Copyright (c) 1998-2010, Brian Gladman, Worcester, UK. All rights reserved. + +The redistribution and use of this software (with or without changes) +is allowed without the payment of fees or royalties provided that: + + source code distributions include the above copyright notice, this + list of conditions and the following disclaimer; + + binary distributions include the above copyright notice, this list + of conditions and the following disclaimer in their documentation. + +This software is provided 'as is' with no explicit or implied warranties +in respect of its operation, including, but not limited to, correctness +and fitness for purpose. +--------------------------------------------------------------------------- +Issue Date: 20/12/2007 + + This file contains the definitions required to use AES in C. See aesopt.h + for optimisation details. +*/ + +#ifndef _AES_H +#define _AES_H + +#include + +/* This include is used to find 8 & 32 bit unsigned integer types */ +#include "brg_types.h" + +#if defined(__cplusplus) +extern "C" +{ +#endif + +#define AES_128 /* if a fast 128 bit key scheduler is needed */ +#define AES_192 /* if a fast 192 bit key scheduler is needed */ +#define AES_256 /* if a fast 256 bit key scheduler is needed */ +#define AES_VAR /* if variable key size scheduler is needed */ +#define AES_MODES /* if support is needed for modes */ + +/* The following must also be set in assembler files if being used */ + +#define AES_ENCRYPT /* if support for encryption is needed */ +#define AES_DECRYPT /* if support for decryption is needed */ +#define AES_REV_DKS /* define to reverse decryption key schedule */ + +#define AES_BLOCK_SIZE 16 /* the AES block size in bytes */ +#define N_COLS 4 /* the number of columns in the state */ + +/* The key schedule length is 11, 13 or 15 16-byte blocks for 128, */ +/* 192 or 256-bit keys respectively. That is 176, 208 or 240 bytes */ +/* or 44, 52 or 60 32-bit words. */ + +#if defined( AES_VAR ) || defined( AES_256 ) +#define KS_LENGTH 60 +#elif defined( AES_192 ) +#define KS_LENGTH 52 +#else +#define KS_LENGTH 44 +#endif + +#define AES_RETURN INT_RETURN + +/* the character array 'inf' in the following structures is used */ +/* to hold AES context information. This AES code uses cx->inf.b[0] */ +/* to hold the number of rounds multiplied by 16. The other three */ +/* elements can be used by code that implements additional modes */ + +typedef union +{ uint_32t l; + uint_8t b[4]; +} aes_inf; + +typedef struct +{ uint_32t ks[KS_LENGTH]; + aes_inf inf; +} aes_encrypt_ctx; + +typedef struct +{ uint_32t ks[KS_LENGTH]; + aes_inf inf; +} aes_decrypt_ctx; + +/* This routine must be called before first use if non-static */ +/* tables are being used */ + +AES_RETURN ks_aes_init(void); + +/* Key lengths in the range 16 <= key_len <= 32 are given in bytes, */ +/* those in the range 128 <= key_len <= 256 are given in bits */ + +#if defined( AES_ENCRYPT ) + +#if defined( AES_128 ) || defined( AES_VAR) +AES_RETURN aes_encrypt_key128(const unsigned char *key, aes_encrypt_ctx cx[1]); +#endif + +#if defined( AES_192 ) || defined( AES_VAR) +AES_RETURN aes_encrypt_key192(const unsigned char *key, aes_encrypt_ctx cx[1]); +#endif + +#if defined( AES_256 ) || defined( AES_VAR) +AES_RETURN aes_encrypt_key256(const unsigned char *key, aes_encrypt_ctx cx[1]); +#endif + +#if defined( AES_VAR ) +AES_RETURN aes_encrypt_key(const unsigned char *key, int key_len, aes_encrypt_ctx cx[1]); +#endif + +AES_RETURN aes_encrypt(const unsigned char *in, unsigned char *out, const aes_encrypt_ctx cx[1]); + +#endif + +#if defined( AES_DECRYPT ) + +#if defined( AES_128 ) || defined( AES_VAR) +AES_RETURN aes_decrypt_key128(const unsigned char *key, aes_decrypt_ctx cx[1]); +#endif + +#if defined( AES_192 ) || defined( AES_VAR) +AES_RETURN aes_decrypt_key192(const unsigned char *key, aes_decrypt_ctx cx[1]); +#endif + +#if defined( AES_256 ) || defined( AES_VAR) +AES_RETURN aes_decrypt_key256(const unsigned char *key, aes_decrypt_ctx cx[1]); +#endif + +#if defined( AES_VAR ) +AES_RETURN aes_decrypt_key(const unsigned char *key, int key_len, aes_decrypt_ctx cx[1]); +#endif + +AES_RETURN aes_decrypt(const unsigned char *in, unsigned char *out, const aes_decrypt_ctx cx[1]); + +#endif + +#if defined( AES_MODES ) + +/* Multiple calls to the following subroutines for multiple block */ +/* ECB, CBC, CFB, OFB and CTR mode encryption can be used to handle */ +/* long messages incremantally provided that the context AND the iv */ +/* are preserved between all such calls. For the ECB and CBC modes */ +/* each individual call within a series of incremental calls must */ +/* process only full blocks (i.e. len must be a multiple of 16) but */ +/* the CFB, OFB and CTR mode calls can handle multiple incremental */ +/* calls of any length. Each mode is reset when a new AES key is */ +/* set but ECB and CBC operations can be reset without setting a */ +/* new key by setting a new IV value. To reset CFB, OFB and CTR */ +/* without setting the key, aes_mode_reset() must be called and the */ +/* IV must be set. NOTE: All these calls update the IV on exit so */ +/* this has to be reset if a new operation with the same IV as the */ +/* previous one is required (or decryption follows encryption with */ +/* the same IV array). */ + +AES_RETURN aes_test_alignment_detection(unsigned int n); + +AES_RETURN aes_ecb_encrypt(const unsigned char *ibuf, unsigned char *obuf, + int len, const aes_encrypt_ctx cx[1]); + +AES_RETURN aes_ecb_decrypt(const unsigned char *ibuf, unsigned char *obuf, + int len, const aes_decrypt_ctx cx[1]); + +AES_RETURN aes_cbc_encrypt(const unsigned char *ibuf, unsigned char *obuf, + int len, unsigned char *iv, const aes_encrypt_ctx cx[1]); + +AES_RETURN aes_cbc_decrypt(const unsigned char *ibuf, unsigned char *obuf, + int len, unsigned char *iv, const aes_decrypt_ctx cx[1]); + +AES_RETURN aes_mode_reset(aes_encrypt_ctx cx[1]); + +AES_RETURN aes_cfb_encrypt(const unsigned char *ibuf, unsigned char *obuf, + int len, unsigned char *iv, aes_encrypt_ctx cx[1]); + +AES_RETURN aes_cfb_decrypt(const unsigned char *ibuf, unsigned char *obuf, + int len, unsigned char *iv, aes_encrypt_ctx cx[1]); + +#define aes_ofb_encrypt aes_ofb_crypt +#define aes_ofb_decrypt aes_ofb_crypt + +AES_RETURN aes_ofb_crypt(const unsigned char *ibuf, unsigned char *obuf, + int len, unsigned char *iv, aes_encrypt_ctx cx[1]); + +typedef void cbuf_inc(unsigned char *cbuf); + +#define aes_ctr_encrypt aes_ctr_crypt +#define aes_ctr_decrypt aes_ctr_crypt + +AES_RETURN aes_ctr_crypt(const unsigned char *ibuf, unsigned char *obuf, + int len, unsigned char *cbuf, cbuf_inc ctr_inc, aes_encrypt_ctx cx[1]); + +#endif + +#if defined(__cplusplus) +} +#endif + +#endif diff --git a/libs/libks/crypt/aes_modes.c b/libs/libks/crypt/aes_modes.c new file mode 100644 index 0000000000..2ffa783bf3 --- /dev/null +++ b/libs/libks/crypt/aes_modes.c @@ -0,0 +1,946 @@ +/* +--------------------------------------------------------------------------- +Copyright (c) 1998-2010, Brian Gladman, Worcester, UK. All rights reserved. + +The redistribution and use of this software (with or without changes) +is allowed without the payment of fees or royalties provided that: + + source code distributions include the above copyright notice, this + list of conditions and the following disclaimer; + + binary distributions include the above copyright notice, this list + of conditions and the following disclaimer in their documentation. + +This software is provided 'as is' with no explicit or implied warranties +in respect of its operation, including, but not limited to, correctness +and fitness for purpose. +--------------------------------------------------------------------------- +Issue Date: 20/12/2007 + + These subroutines implement multiple block AES modes for ECB, CBC, CFB, + OFB and CTR encryption, The code provides support for the VIA Advanced + Cryptography Engine (ACE). + + NOTE: In the following subroutines, the AES contexts (ctx) must be + 16 byte aligned if VIA ACE is being used +*/ + +#include +#include + +#include "aesopt.h" + +#if defined( AES_MODES ) +#if defined(__cplusplus) +extern "C" +{ +#endif + +#if defined( _MSC_VER ) && ( _MSC_VER > 800 ) +#pragma intrinsic(memcpy) +#endif + +#define BFR_BLOCKS 8 + +/* These values are used to detect long word alignment in order to */ +/* speed up some buffer operations. This facility may not work on */ +/* some machines so this define can be commented out if necessary */ + +#define FAST_BUFFER_OPERATIONS + +#define lp32(x) ((uint_32t*)(x)) + +#if defined( USE_VIA_ACE_IF_PRESENT ) + +#include "aes_via_ace.h" + +#pragma pack(16) + +aligned_array(unsigned long, enc_gen_table, 12, 16) = NEH_ENC_GEN_DATA; +aligned_array(unsigned long, enc_load_table, 12, 16) = NEH_ENC_LOAD_DATA; +aligned_array(unsigned long, enc_hybrid_table, 12, 16) = NEH_ENC_HYBRID_DATA; +aligned_array(unsigned long, dec_gen_table, 12, 16) = NEH_DEC_GEN_DATA; +aligned_array(unsigned long, dec_load_table, 12, 16) = NEH_DEC_LOAD_DATA; +aligned_array(unsigned long, dec_hybrid_table, 12, 16) = NEH_DEC_HYBRID_DATA; + +/* NOTE: These control word macros must only be used after */ +/* a key has been set up because they depend on key size */ +/* See the VIA ACE documentation for key type information */ +/* and aes_via_ace.h for non-default NEH_KEY_TYPE values */ + +#ifndef NEH_KEY_TYPE +# define NEH_KEY_TYPE NEH_HYBRID +#endif + +#if NEH_KEY_TYPE == NEH_LOAD +#define kd_adr(c) ((uint_8t*)(c)->ks) +#elif NEH_KEY_TYPE == NEH_GENERATE +#define kd_adr(c) ((uint_8t*)(c)->ks + (c)->inf.b[0]) +#elif NEH_KEY_TYPE == NEH_HYBRID +#define kd_adr(c) ((uint_8t*)(c)->ks + ((c)->inf.b[0] == 160 ? 160 : 0)) +#else +#error no key type defined for VIA ACE +#endif + +#else + +#define aligned_array(type, name, no, stride) type name[no] +#define aligned_auto(type, name, no, stride) type name[no] + +#endif + +#if defined( _MSC_VER ) && _MSC_VER > 1200 + +#define via_cwd(cwd, ty, dir, len) \ + unsigned long* cwd = (dir##_##ty##_table + ((len - 128) >> 4)) + +#else + +#define via_cwd(cwd, ty, dir, len) \ + aligned_auto(unsigned long, cwd, 4, 16); \ + cwd[1] = cwd[2] = cwd[3] = 0; \ + cwd[0] = neh_##dir##_##ty##_key(len) + +#endif + +/* test the code for detecting and setting pointer alignment */ + +AES_RETURN aes_test_alignment_detection(unsigned int n) /* 4 <= n <= 16 */ +{ uint_8t p[16]; + uint_32t i, count_eq = 0, count_neq = 0; + + if(n < 4 || n > 16) + return EXIT_FAILURE; + + for(i = 0; i < n; ++i) + { + uint_8t *qf = ALIGN_FLOOR(p + i, n), + *qh = ALIGN_CEIL(p + i, n); + + if(qh == qf) + ++count_eq; + else if(qh == qf + n) + ++count_neq; + else + return EXIT_FAILURE; + } + return (count_eq != 1 || count_neq != n - 1 ? EXIT_FAILURE : EXIT_SUCCESS); +} + +AES_RETURN aes_mode_reset(aes_encrypt_ctx ctx[1]) +{ + ctx->inf.b[2] = 0; + return EXIT_SUCCESS; +} + +AES_RETURN aes_ecb_encrypt(const unsigned char *ibuf, unsigned char *obuf, + int len, const aes_encrypt_ctx ctx[1]) +{ int nb = len >> 4; + + if(len & (AES_BLOCK_SIZE - 1)) + return EXIT_FAILURE; + +#if defined( USE_VIA_ACE_IF_PRESENT ) + + if(ctx->inf.b[1] == 0xff) + { uint_8t *ksp = (uint_8t*)(ctx->ks); + via_cwd(cwd, hybrid, enc, 2 * ctx->inf.b[0] - 192); + + if(ALIGN_OFFSET( ctx, 16 )) + return EXIT_FAILURE; + + if(!ALIGN_OFFSET( ibuf, 16 ) && !ALIGN_OFFSET( obuf, 16 )) + { + via_ecb_op5(ksp, cwd, ibuf, obuf, nb); + } + else + { aligned_auto(uint_8t, buf, BFR_BLOCKS * AES_BLOCK_SIZE, 16); + uint_8t *ip, *op; + + while(nb) + { + int m = (nb > BFR_BLOCKS ? BFR_BLOCKS : nb); + + ip = (ALIGN_OFFSET( ibuf, 16 ) ? buf : ibuf); + op = (ALIGN_OFFSET( obuf, 16 ) ? buf : obuf); + + if(ip != ibuf) + memcpy(buf, ibuf, m * AES_BLOCK_SIZE); + + via_ecb_op5(ksp, cwd, ip, op, m); + + if(op != obuf) + memcpy(obuf, buf, m * AES_BLOCK_SIZE); + + ibuf += m * AES_BLOCK_SIZE; + obuf += m * AES_BLOCK_SIZE; + nb -= m; + } + } + + return EXIT_SUCCESS; + } + +#endif + +#if !defined( ASSUME_VIA_ACE_PRESENT ) + while(nb--) + { + if(aes_encrypt(ibuf, obuf, ctx) != EXIT_SUCCESS) + return EXIT_FAILURE; + ibuf += AES_BLOCK_SIZE; + obuf += AES_BLOCK_SIZE; + } +#endif + return EXIT_SUCCESS; +} + +AES_RETURN aes_ecb_decrypt(const unsigned char *ibuf, unsigned char *obuf, + int len, const aes_decrypt_ctx ctx[1]) +{ int nb = len >> 4; + + if(len & (AES_BLOCK_SIZE - 1)) + return EXIT_FAILURE; + +#if defined( USE_VIA_ACE_IF_PRESENT ) + + if(ctx->inf.b[1] == 0xff) + { uint_8t *ksp = kd_adr(ctx); + via_cwd(cwd, hybrid, dec, 2 * ctx->inf.b[0] - 192); + + if(ALIGN_OFFSET( ctx, 16 )) + return EXIT_FAILURE; + + if(!ALIGN_OFFSET( ibuf, 16 ) && !ALIGN_OFFSET( obuf, 16 )) + { + via_ecb_op5(ksp, cwd, ibuf, obuf, nb); + } + else + { aligned_auto(uint_8t, buf, BFR_BLOCKS * AES_BLOCK_SIZE, 16); + uint_8t *ip, *op; + + while(nb) + { + int m = (nb > BFR_BLOCKS ? BFR_BLOCKS : nb); + + ip = (ALIGN_OFFSET( ibuf, 16 ) ? buf : ibuf); + op = (ALIGN_OFFSET( obuf, 16 ) ? buf : obuf); + + if(ip != ibuf) + memcpy(buf, ibuf, m * AES_BLOCK_SIZE); + + via_ecb_op5(ksp, cwd, ip, op, m); + + if(op != obuf) + memcpy(obuf, buf, m * AES_BLOCK_SIZE); + + ibuf += m * AES_BLOCK_SIZE; + obuf += m * AES_BLOCK_SIZE; + nb -= m; + } + } + + return EXIT_SUCCESS; + } + +#endif + +#if !defined( ASSUME_VIA_ACE_PRESENT ) + while(nb--) + { + if(aes_decrypt(ibuf, obuf, ctx) != EXIT_SUCCESS) + return EXIT_FAILURE; + ibuf += AES_BLOCK_SIZE; + obuf += AES_BLOCK_SIZE; + } +#endif + return EXIT_SUCCESS; +} + +AES_RETURN aes_cbc_encrypt(const unsigned char *ibuf, unsigned char *obuf, + int len, unsigned char *iv, const aes_encrypt_ctx ctx[1]) +{ int nb = len >> 4; + + if(len & (AES_BLOCK_SIZE - 1)) + return EXIT_FAILURE; + +#if defined( USE_VIA_ACE_IF_PRESENT ) + + if(ctx->inf.b[1] == 0xff) + { uint_8t *ksp = (uint_8t*)(ctx->ks), *ivp = iv; + aligned_auto(uint_8t, liv, AES_BLOCK_SIZE, 16); + via_cwd(cwd, hybrid, enc, 2 * ctx->inf.b[0] - 192); + + if(ALIGN_OFFSET( ctx, 16 )) + return EXIT_FAILURE; + + if(ALIGN_OFFSET( iv, 16 )) /* ensure an aligned iv */ + { + ivp = liv; + memcpy(liv, iv, AES_BLOCK_SIZE); + } + + if(!ALIGN_OFFSET( ibuf, 16 ) && !ALIGN_OFFSET( obuf, 16 ) && !ALIGN_OFFSET( iv, 16 )) + { + via_cbc_op7(ksp, cwd, ibuf, obuf, nb, ivp, ivp); + } + else + { aligned_auto(uint_8t, buf, BFR_BLOCKS * AES_BLOCK_SIZE, 16); + uint_8t *ip, *op; + + while(nb) + { + int m = (nb > BFR_BLOCKS ? BFR_BLOCKS : nb); + + ip = (ALIGN_OFFSET( ibuf, 16 ) ? buf : ibuf); + op = (ALIGN_OFFSET( obuf, 16 ) ? buf : obuf); + + if(ip != ibuf) + memcpy(buf, ibuf, m * AES_BLOCK_SIZE); + + via_cbc_op7(ksp, cwd, ip, op, m, ivp, ivp); + + if(op != obuf) + memcpy(obuf, buf, m * AES_BLOCK_SIZE); + + ibuf += m * AES_BLOCK_SIZE; + obuf += m * AES_BLOCK_SIZE; + nb -= m; + } + } + + if(iv != ivp) + memcpy(iv, ivp, AES_BLOCK_SIZE); + + return EXIT_SUCCESS; + } + +#endif + +#if !defined( ASSUME_VIA_ACE_PRESENT ) +# ifdef FAST_BUFFER_OPERATIONS + if(!ALIGN_OFFSET( ibuf, 4 ) && !ALIGN_OFFSET( iv, 4 )) + while(nb--) + { + lp32(iv)[0] ^= lp32(ibuf)[0]; + lp32(iv)[1] ^= lp32(ibuf)[1]; + lp32(iv)[2] ^= lp32(ibuf)[2]; + lp32(iv)[3] ^= lp32(ibuf)[3]; + if(aes_encrypt(iv, iv, ctx) != EXIT_SUCCESS) + return EXIT_FAILURE; + memcpy(obuf, iv, AES_BLOCK_SIZE); + ibuf += AES_BLOCK_SIZE; + obuf += AES_BLOCK_SIZE; + } + else +# endif + while(nb--) + { + iv[ 0] ^= ibuf[ 0]; iv[ 1] ^= ibuf[ 1]; + iv[ 2] ^= ibuf[ 2]; iv[ 3] ^= ibuf[ 3]; + iv[ 4] ^= ibuf[ 4]; iv[ 5] ^= ibuf[ 5]; + iv[ 6] ^= ibuf[ 6]; iv[ 7] ^= ibuf[ 7]; + iv[ 8] ^= ibuf[ 8]; iv[ 9] ^= ibuf[ 9]; + iv[10] ^= ibuf[10]; iv[11] ^= ibuf[11]; + iv[12] ^= ibuf[12]; iv[13] ^= ibuf[13]; + iv[14] ^= ibuf[14]; iv[15] ^= ibuf[15]; + if(aes_encrypt(iv, iv, ctx) != EXIT_SUCCESS) + return EXIT_FAILURE; + memcpy(obuf, iv, AES_BLOCK_SIZE); + ibuf += AES_BLOCK_SIZE; + obuf += AES_BLOCK_SIZE; + } +#endif + return EXIT_SUCCESS; +} + +AES_RETURN aes_cbc_decrypt(const unsigned char *ibuf, unsigned char *obuf, + int len, unsigned char *iv, const aes_decrypt_ctx ctx[1]) +{ unsigned char tmp[AES_BLOCK_SIZE]; + int nb = len >> 4; + + if(len & (AES_BLOCK_SIZE - 1)) + return EXIT_FAILURE; + +#if defined( USE_VIA_ACE_IF_PRESENT ) + + if(ctx->inf.b[1] == 0xff) + { uint_8t *ksp = kd_adr(ctx), *ivp = iv; + aligned_auto(uint_8t, liv, AES_BLOCK_SIZE, 16); + via_cwd(cwd, hybrid, dec, 2 * ctx->inf.b[0] - 192); + + if(ALIGN_OFFSET( ctx, 16 )) + return EXIT_FAILURE; + + if(ALIGN_OFFSET( iv, 16 )) /* ensure an aligned iv */ + { + ivp = liv; + memcpy(liv, iv, AES_BLOCK_SIZE); + } + + if(!ALIGN_OFFSET( ibuf, 16 ) && !ALIGN_OFFSET( obuf, 16 ) && !ALIGN_OFFSET( iv, 16 )) + { + via_cbc_op6(ksp, cwd, ibuf, obuf, nb, ivp); + } + else + { aligned_auto(uint_8t, buf, BFR_BLOCKS * AES_BLOCK_SIZE, 16); + uint_8t *ip, *op; + + while(nb) + { + int m = (nb > BFR_BLOCKS ? BFR_BLOCKS : nb); + + ip = (ALIGN_OFFSET( ibuf, 16 ) ? buf : ibuf); + op = (ALIGN_OFFSET( obuf, 16 ) ? buf : obuf); + + if(ip != ibuf) + memcpy(buf, ibuf, m * AES_BLOCK_SIZE); + + via_cbc_op6(ksp, cwd, ip, op, m, ivp); + + if(op != obuf) + memcpy(obuf, buf, m * AES_BLOCK_SIZE); + + ibuf += m * AES_BLOCK_SIZE; + obuf += m * AES_BLOCK_SIZE; + nb -= m; + } + } + + if(iv != ivp) + memcpy(iv, ivp, AES_BLOCK_SIZE); + + return EXIT_SUCCESS; + } +#endif + +#if !defined( ASSUME_VIA_ACE_PRESENT ) +# ifdef FAST_BUFFER_OPERATIONS + if(!ALIGN_OFFSET( obuf, 4 ) && !ALIGN_OFFSET( iv, 4 )) + while(nb--) + { + memcpy(tmp, ibuf, AES_BLOCK_SIZE); + if(aes_decrypt(ibuf, obuf, ctx) != EXIT_SUCCESS) + return EXIT_FAILURE; + lp32(obuf)[0] ^= lp32(iv)[0]; + lp32(obuf)[1] ^= lp32(iv)[1]; + lp32(obuf)[2] ^= lp32(iv)[2]; + lp32(obuf)[3] ^= lp32(iv)[3]; + memcpy(iv, tmp, AES_BLOCK_SIZE); + ibuf += AES_BLOCK_SIZE; + obuf += AES_BLOCK_SIZE; + } + else +# endif + while(nb--) + { + memcpy(tmp, ibuf, AES_BLOCK_SIZE); + if(aes_decrypt(ibuf, obuf, ctx) != EXIT_SUCCESS) + return EXIT_FAILURE; + obuf[ 0] ^= iv[ 0]; obuf[ 1] ^= iv[ 1]; + obuf[ 2] ^= iv[ 2]; obuf[ 3] ^= iv[ 3]; + obuf[ 4] ^= iv[ 4]; obuf[ 5] ^= iv[ 5]; + obuf[ 6] ^= iv[ 6]; obuf[ 7] ^= iv[ 7]; + obuf[ 8] ^= iv[ 8]; obuf[ 9] ^= iv[ 9]; + obuf[10] ^= iv[10]; obuf[11] ^= iv[11]; + obuf[12] ^= iv[12]; obuf[13] ^= iv[13]; + obuf[14] ^= iv[14]; obuf[15] ^= iv[15]; + memcpy(iv, tmp, AES_BLOCK_SIZE); + ibuf += AES_BLOCK_SIZE; + obuf += AES_BLOCK_SIZE; + } +#endif + return EXIT_SUCCESS; +} + +AES_RETURN aes_cfb_encrypt(const unsigned char *ibuf, unsigned char *obuf, + int len, unsigned char *iv, aes_encrypt_ctx ctx[1]) +{ int cnt = 0, b_pos = (int)ctx->inf.b[2], nb; + + if(b_pos) /* complete any partial block */ + { + while(b_pos < AES_BLOCK_SIZE && cnt < len) + { + *obuf++ = (iv[b_pos++] ^= *ibuf++); + cnt++; + } + + b_pos = (b_pos == AES_BLOCK_SIZE ? 0 : b_pos); + } + + if((nb = (len - cnt) >> 4) != 0) /* process whole blocks */ + { +#if defined( USE_VIA_ACE_IF_PRESENT ) + + if(ctx->inf.b[1] == 0xff) + { int m; + uint_8t *ksp = (uint_8t*)(ctx->ks), *ivp = iv; + aligned_auto(uint_8t, liv, AES_BLOCK_SIZE, 16); + via_cwd(cwd, hybrid, enc, 2 * ctx->inf.b[0] - 192); + + if(ALIGN_OFFSET( ctx, 16 )) + return EXIT_FAILURE; + + if(ALIGN_OFFSET( iv, 16 )) /* ensure an aligned iv */ + { + ivp = liv; + memcpy(liv, iv, AES_BLOCK_SIZE); + } + + if(!ALIGN_OFFSET( ibuf, 16 ) && !ALIGN_OFFSET( obuf, 16 )) + { + via_cfb_op7(ksp, cwd, ibuf, obuf, nb, ivp, ivp); + ibuf += nb * AES_BLOCK_SIZE; + obuf += nb * AES_BLOCK_SIZE; + cnt += nb * AES_BLOCK_SIZE; + } + else /* input, output or both are unaligned */ + { aligned_auto(uint_8t, buf, BFR_BLOCKS * AES_BLOCK_SIZE, 16); + uint_8t *ip, *op; + + while(nb) + { + m = (nb > BFR_BLOCKS ? BFR_BLOCKS : nb), nb -= m; + + ip = (ALIGN_OFFSET( ibuf, 16 ) ? buf : ibuf); + op = (ALIGN_OFFSET( obuf, 16 ) ? buf : obuf); + + if(ip != ibuf) + memcpy(buf, ibuf, m * AES_BLOCK_SIZE); + + via_cfb_op7(ksp, cwd, ip, op, m, ivp, ivp); + + if(op != obuf) + memcpy(obuf, buf, m * AES_BLOCK_SIZE); + + ibuf += m * AES_BLOCK_SIZE; + obuf += m * AES_BLOCK_SIZE; + cnt += m * AES_BLOCK_SIZE; + } + } + + if(ivp != iv) + memcpy(iv, ivp, AES_BLOCK_SIZE); + } +#else +# ifdef FAST_BUFFER_OPERATIONS + if(!ALIGN_OFFSET( ibuf, 4 ) && !ALIGN_OFFSET( obuf, 4 ) && !ALIGN_OFFSET( iv, 4 )) + while(cnt + AES_BLOCK_SIZE <= len) + { + assert(b_pos == 0); + if(aes_encrypt(iv, iv, ctx) != EXIT_SUCCESS) + return EXIT_FAILURE; + lp32(obuf)[0] = lp32(iv)[0] ^= lp32(ibuf)[0]; + lp32(obuf)[1] = lp32(iv)[1] ^= lp32(ibuf)[1]; + lp32(obuf)[2] = lp32(iv)[2] ^= lp32(ibuf)[2]; + lp32(obuf)[3] = lp32(iv)[3] ^= lp32(ibuf)[3]; + ibuf += AES_BLOCK_SIZE; + obuf += AES_BLOCK_SIZE; + cnt += AES_BLOCK_SIZE; + } + else +# endif + while(cnt + AES_BLOCK_SIZE <= len) + { + assert(b_pos == 0); + if(aes_encrypt(iv, iv, ctx) != EXIT_SUCCESS) + return EXIT_FAILURE; + obuf[ 0] = iv[ 0] ^= ibuf[ 0]; obuf[ 1] = iv[ 1] ^= ibuf[ 1]; + obuf[ 2] = iv[ 2] ^= ibuf[ 2]; obuf[ 3] = iv[ 3] ^= ibuf[ 3]; + obuf[ 4] = iv[ 4] ^= ibuf[ 4]; obuf[ 5] = iv[ 5] ^= ibuf[ 5]; + obuf[ 6] = iv[ 6] ^= ibuf[ 6]; obuf[ 7] = iv[ 7] ^= ibuf[ 7]; + obuf[ 8] = iv[ 8] ^= ibuf[ 8]; obuf[ 9] = iv[ 9] ^= ibuf[ 9]; + obuf[10] = iv[10] ^= ibuf[10]; obuf[11] = iv[11] ^= ibuf[11]; + obuf[12] = iv[12] ^= ibuf[12]; obuf[13] = iv[13] ^= ibuf[13]; + obuf[14] = iv[14] ^= ibuf[14]; obuf[15] = iv[15] ^= ibuf[15]; + ibuf += AES_BLOCK_SIZE; + obuf += AES_BLOCK_SIZE; + cnt += AES_BLOCK_SIZE; + } +#endif + } + + while(cnt < len) + { + if(!b_pos && aes_encrypt(iv, iv, ctx) != EXIT_SUCCESS) + return EXIT_FAILURE; + + while(cnt < len && b_pos < AES_BLOCK_SIZE) + { + *obuf++ = (iv[b_pos++] ^= *ibuf++); + cnt++; + } + + b_pos = (b_pos == AES_BLOCK_SIZE ? 0 : b_pos); + } + + ctx->inf.b[2] = (uint_8t)b_pos; + return EXIT_SUCCESS; +} + +AES_RETURN aes_cfb_decrypt(const unsigned char *ibuf, unsigned char *obuf, + int len, unsigned char *iv, aes_encrypt_ctx ctx[1]) +{ int cnt = 0, b_pos = (int)ctx->inf.b[2], nb; + + if(b_pos) /* complete any partial block */ + { uint_8t t; + + while(b_pos < AES_BLOCK_SIZE && cnt < len) + { + t = *ibuf++; + *obuf++ = t ^ iv[b_pos]; + iv[b_pos++] = t; + cnt++; + } + + b_pos = (b_pos == AES_BLOCK_SIZE ? 0 : b_pos); + } + + if((nb = (len - cnt) >> 4) != 0) /* process whole blocks */ + { +#if defined( USE_VIA_ACE_IF_PRESENT ) + + if(ctx->inf.b[1] == 0xff) + { int m; + uint_8t *ksp = (uint_8t*)(ctx->ks), *ivp = iv; + aligned_auto(uint_8t, liv, AES_BLOCK_SIZE, 16); + via_cwd(cwd, hybrid, dec, 2 * ctx->inf.b[0] - 192); + + if(ALIGN_OFFSET( ctx, 16 )) + return EXIT_FAILURE; + + if(ALIGN_OFFSET( iv, 16 )) /* ensure an aligned iv */ + { + ivp = liv; + memcpy(liv, iv, AES_BLOCK_SIZE); + } + + if(!ALIGN_OFFSET( ibuf, 16 ) && !ALIGN_OFFSET( obuf, 16 )) + { + via_cfb_op6(ksp, cwd, ibuf, obuf, nb, ivp); + ibuf += nb * AES_BLOCK_SIZE; + obuf += nb * AES_BLOCK_SIZE; + cnt += nb * AES_BLOCK_SIZE; + } + else /* input, output or both are unaligned */ + { aligned_auto(uint_8t, buf, BFR_BLOCKS * AES_BLOCK_SIZE, 16); + uint_8t *ip, *op; + + while(nb) + { + m = (nb > BFR_BLOCKS ? BFR_BLOCKS : nb), nb -= m; + + ip = (ALIGN_OFFSET( ibuf, 16 ) ? buf : ibuf); + op = (ALIGN_OFFSET( obuf, 16 ) ? buf : obuf); + + if(ip != ibuf) /* input buffer is not aligned */ + memcpy(buf, ibuf, m * AES_BLOCK_SIZE); + + via_cfb_op6(ksp, cwd, ip, op, m, ivp); + + if(op != obuf) /* output buffer is not aligned */ + memcpy(obuf, buf, m * AES_BLOCK_SIZE); + + ibuf += m * AES_BLOCK_SIZE; + obuf += m * AES_BLOCK_SIZE; + cnt += m * AES_BLOCK_SIZE; + } + } + + if(ivp != iv) + memcpy(iv, ivp, AES_BLOCK_SIZE); + } +#else +# ifdef FAST_BUFFER_OPERATIONS + if(!ALIGN_OFFSET( ibuf, 4 ) && !ALIGN_OFFSET( obuf, 4 ) &&!ALIGN_OFFSET( iv, 4 )) + while(cnt + AES_BLOCK_SIZE <= len) + { uint_32t t; + + assert(b_pos == 0); + if(aes_encrypt(iv, iv, ctx) != EXIT_SUCCESS) + return EXIT_FAILURE; + t = lp32(ibuf)[0], lp32(obuf)[0] = t ^ lp32(iv)[0], lp32(iv)[0] = t; + t = lp32(ibuf)[1], lp32(obuf)[1] = t ^ lp32(iv)[1], lp32(iv)[1] = t; + t = lp32(ibuf)[2], lp32(obuf)[2] = t ^ lp32(iv)[2], lp32(iv)[2] = t; + t = lp32(ibuf)[3], lp32(obuf)[3] = t ^ lp32(iv)[3], lp32(iv)[3] = t; + ibuf += AES_BLOCK_SIZE; + obuf += AES_BLOCK_SIZE; + cnt += AES_BLOCK_SIZE; + } + else +# endif + while(cnt + AES_BLOCK_SIZE <= len) + { uint_8t t; + + assert(b_pos == 0); + if(aes_encrypt(iv, iv, ctx) != EXIT_SUCCESS) + return EXIT_FAILURE; + t = ibuf[ 0], obuf[ 0] = t ^ iv[ 0], iv[ 0] = t; + t = ibuf[ 1], obuf[ 1] = t ^ iv[ 1], iv[ 1] = t; + t = ibuf[ 2], obuf[ 2] = t ^ iv[ 2], iv[ 2] = t; + t = ibuf[ 3], obuf[ 3] = t ^ iv[ 3], iv[ 3] = t; + t = ibuf[ 4], obuf[ 4] = t ^ iv[ 4], iv[ 4] = t; + t = ibuf[ 5], obuf[ 5] = t ^ iv[ 5], iv[ 5] = t; + t = ibuf[ 6], obuf[ 6] = t ^ iv[ 6], iv[ 6] = t; + t = ibuf[ 7], obuf[ 7] = t ^ iv[ 7], iv[ 7] = t; + t = ibuf[ 8], obuf[ 8] = t ^ iv[ 8], iv[ 8] = t; + t = ibuf[ 9], obuf[ 9] = t ^ iv[ 9], iv[ 9] = t; + t = ibuf[10], obuf[10] = t ^ iv[10], iv[10] = t; + t = ibuf[11], obuf[11] = t ^ iv[11], iv[11] = t; + t = ibuf[12], obuf[12] = t ^ iv[12], iv[12] = t; + t = ibuf[13], obuf[13] = t ^ iv[13], iv[13] = t; + t = ibuf[14], obuf[14] = t ^ iv[14], iv[14] = t; + t = ibuf[15], obuf[15] = t ^ iv[15], iv[15] = t; + ibuf += AES_BLOCK_SIZE; + obuf += AES_BLOCK_SIZE; + cnt += AES_BLOCK_SIZE; + } +#endif + } + + while(cnt < len) + { uint_8t t; + + if(!b_pos && aes_encrypt(iv, iv, ctx) != EXIT_SUCCESS) + return EXIT_FAILURE; + + while(cnt < len && b_pos < AES_BLOCK_SIZE) + { + t = *ibuf++; + *obuf++ = t ^ iv[b_pos]; + iv[b_pos++] = t; + cnt++; + } + + b_pos = (b_pos == AES_BLOCK_SIZE ? 0 : b_pos); + } + + ctx->inf.b[2] = (uint_8t)b_pos; + return EXIT_SUCCESS; +} + +AES_RETURN aes_ofb_crypt(const unsigned char *ibuf, unsigned char *obuf, + int len, unsigned char *iv, aes_encrypt_ctx ctx[1]) +{ int cnt = 0, b_pos = (int)ctx->inf.b[2], nb; + + if(b_pos) /* complete any partial block */ + { + while(b_pos < AES_BLOCK_SIZE && cnt < len) + { + *obuf++ = iv[b_pos++] ^ *ibuf++; + cnt++; + } + + b_pos = (b_pos == AES_BLOCK_SIZE ? 0 : b_pos); + } + + if((nb = (len - cnt) >> 4) != 0) /* process whole blocks */ + { +#if defined( USE_VIA_ACE_IF_PRESENT ) + + if(ctx->inf.b[1] == 0xff) + { int m; + uint_8t *ksp = (uint_8t*)(ctx->ks), *ivp = iv; + aligned_auto(uint_8t, liv, AES_BLOCK_SIZE, 16); + via_cwd(cwd, hybrid, enc, 2 * ctx->inf.b[0] - 192); + + if(ALIGN_OFFSET( ctx, 16 )) + return EXIT_FAILURE; + + if(ALIGN_OFFSET( iv, 16 )) /* ensure an aligned iv */ + { + ivp = liv; + memcpy(liv, iv, AES_BLOCK_SIZE); + } + + if(!ALIGN_OFFSET( ibuf, 16 ) && !ALIGN_OFFSET( obuf, 16 )) + { + via_ofb_op6(ksp, cwd, ibuf, obuf, nb, ivp); + ibuf += nb * AES_BLOCK_SIZE; + obuf += nb * AES_BLOCK_SIZE; + cnt += nb * AES_BLOCK_SIZE; + } + else /* input, output or both are unaligned */ + { aligned_auto(uint_8t, buf, BFR_BLOCKS * AES_BLOCK_SIZE, 16); + uint_8t *ip, *op; + + while(nb) + { + m = (nb > BFR_BLOCKS ? BFR_BLOCKS : nb), nb -= m; + + ip = (ALIGN_OFFSET( ibuf, 16 ) ? buf : ibuf); + op = (ALIGN_OFFSET( obuf, 16 ) ? buf : obuf); + + if(ip != ibuf) + memcpy(buf, ibuf, m * AES_BLOCK_SIZE); + + via_ofb_op6(ksp, cwd, ip, op, m, ivp); + + if(op != obuf) + memcpy(obuf, buf, m * AES_BLOCK_SIZE); + + ibuf += m * AES_BLOCK_SIZE; + obuf += m * AES_BLOCK_SIZE; + cnt += m * AES_BLOCK_SIZE; + } + } + + if(ivp != iv) + memcpy(iv, ivp, AES_BLOCK_SIZE); + } +#else +# ifdef FAST_BUFFER_OPERATIONS + if(!ALIGN_OFFSET( ibuf, 4 ) && !ALIGN_OFFSET( obuf, 4 ) && !ALIGN_OFFSET( iv, 4 )) + while(cnt + AES_BLOCK_SIZE <= len) + { + assert(b_pos == 0); + if(aes_encrypt(iv, iv, ctx) != EXIT_SUCCESS) + return EXIT_FAILURE; + lp32(obuf)[0] = lp32(iv)[0] ^ lp32(ibuf)[0]; + lp32(obuf)[1] = lp32(iv)[1] ^ lp32(ibuf)[1]; + lp32(obuf)[2] = lp32(iv)[2] ^ lp32(ibuf)[2]; + lp32(obuf)[3] = lp32(iv)[3] ^ lp32(ibuf)[3]; + ibuf += AES_BLOCK_SIZE; + obuf += AES_BLOCK_SIZE; + cnt += AES_BLOCK_SIZE; + } + else +# endif + while(cnt + AES_BLOCK_SIZE <= len) + { + assert(b_pos == 0); + if(aes_encrypt(iv, iv, ctx) != EXIT_SUCCESS) + return EXIT_FAILURE; + obuf[ 0] = iv[ 0] ^ ibuf[ 0]; obuf[ 1] = iv[ 1] ^ ibuf[ 1]; + obuf[ 2] = iv[ 2] ^ ibuf[ 2]; obuf[ 3] = iv[ 3] ^ ibuf[ 3]; + obuf[ 4] = iv[ 4] ^ ibuf[ 4]; obuf[ 5] = iv[ 5] ^ ibuf[ 5]; + obuf[ 6] = iv[ 6] ^ ibuf[ 6]; obuf[ 7] = iv[ 7] ^ ibuf[ 7]; + obuf[ 8] = iv[ 8] ^ ibuf[ 8]; obuf[ 9] = iv[ 9] ^ ibuf[ 9]; + obuf[10] = iv[10] ^ ibuf[10]; obuf[11] = iv[11] ^ ibuf[11]; + obuf[12] = iv[12] ^ ibuf[12]; obuf[13] = iv[13] ^ ibuf[13]; + obuf[14] = iv[14] ^ ibuf[14]; obuf[15] = iv[15] ^ ibuf[15]; + ibuf += AES_BLOCK_SIZE; + obuf += AES_BLOCK_SIZE; + cnt += AES_BLOCK_SIZE; + } +#endif + } + + while(cnt < len) + { + if(!b_pos && aes_encrypt(iv, iv, ctx) != EXIT_SUCCESS) + return EXIT_FAILURE; + + while(cnt < len && b_pos < AES_BLOCK_SIZE) + { + *obuf++ = iv[b_pos++] ^ *ibuf++; + cnt++; + } + + b_pos = (b_pos == AES_BLOCK_SIZE ? 0 : b_pos); + } + + ctx->inf.b[2] = (uint_8t)b_pos; + return EXIT_SUCCESS; +} + +#define BFR_LENGTH (BFR_BLOCKS * AES_BLOCK_SIZE) + +AES_RETURN aes_ctr_crypt(const unsigned char *ibuf, unsigned char *obuf, + int len, unsigned char *cbuf, cbuf_inc ctr_inc, aes_encrypt_ctx ctx[1]) +{ unsigned char *ip; + int i, blen, b_pos = (int)(ctx->inf.b[2]); + +#if defined( USE_VIA_ACE_IF_PRESENT ) + aligned_auto(uint_8t, buf, BFR_LENGTH, 16); + if(ctx->inf.b[1] == 0xff && ALIGN_OFFSET( ctx, 16 )) + return EXIT_FAILURE; +#else + uint_8t buf[BFR_LENGTH]; +#endif + + if(b_pos) + { + memcpy(buf, cbuf, AES_BLOCK_SIZE); + if(aes_ecb_encrypt(buf, buf, AES_BLOCK_SIZE, ctx) != EXIT_SUCCESS) + return EXIT_FAILURE; + + while(b_pos < AES_BLOCK_SIZE && len) + { + *obuf++ = *ibuf++ ^ buf[b_pos++]; + --len; + } + + if(len) + ctr_inc(cbuf), b_pos = 0; + } + + while(len) + { + blen = (len > BFR_LENGTH ? BFR_LENGTH : len), len -= blen; + + for(i = 0, ip = buf; i < (blen >> 4); ++i) + { + memcpy(ip, cbuf, AES_BLOCK_SIZE); + ctr_inc(cbuf); + ip += AES_BLOCK_SIZE; + } + + if(blen & (AES_BLOCK_SIZE - 1)) + memcpy(ip, cbuf, AES_BLOCK_SIZE), i++; + +#if defined( USE_VIA_ACE_IF_PRESENT ) + if(ctx->inf.b[1] == 0xff) + { + via_cwd(cwd, hybrid, enc, 2 * ctx->inf.b[0] - 192); + via_ecb_op5((ctx->ks), cwd, buf, buf, i); + } + else +#endif + if(aes_ecb_encrypt(buf, buf, i * AES_BLOCK_SIZE, ctx) != EXIT_SUCCESS) + return EXIT_FAILURE; + + i = 0; ip = buf; +# ifdef FAST_BUFFER_OPERATIONS + if(!ALIGN_OFFSET( ibuf, 4 ) && !ALIGN_OFFSET( obuf, 4 ) && !ALIGN_OFFSET( ip, 4 )) + while(i + AES_BLOCK_SIZE <= blen) + { + lp32(obuf)[0] = lp32(ibuf)[0] ^ lp32(ip)[0]; + lp32(obuf)[1] = lp32(ibuf)[1] ^ lp32(ip)[1]; + lp32(obuf)[2] = lp32(ibuf)[2] ^ lp32(ip)[2]; + lp32(obuf)[3] = lp32(ibuf)[3] ^ lp32(ip)[3]; + i += AES_BLOCK_SIZE; + ip += AES_BLOCK_SIZE; + ibuf += AES_BLOCK_SIZE; + obuf += AES_BLOCK_SIZE; + } + else +#endif + while(i + AES_BLOCK_SIZE <= blen) + { + obuf[ 0] = ibuf[ 0] ^ ip[ 0]; obuf[ 1] = ibuf[ 1] ^ ip[ 1]; + obuf[ 2] = ibuf[ 2] ^ ip[ 2]; obuf[ 3] = ibuf[ 3] ^ ip[ 3]; + obuf[ 4] = ibuf[ 4] ^ ip[ 4]; obuf[ 5] = ibuf[ 5] ^ ip[ 5]; + obuf[ 6] = ibuf[ 6] ^ ip[ 6]; obuf[ 7] = ibuf[ 7] ^ ip[ 7]; + obuf[ 8] = ibuf[ 8] ^ ip[ 8]; obuf[ 9] = ibuf[ 9] ^ ip[ 9]; + obuf[10] = ibuf[10] ^ ip[10]; obuf[11] = ibuf[11] ^ ip[11]; + obuf[12] = ibuf[12] ^ ip[12]; obuf[13] = ibuf[13] ^ ip[13]; + obuf[14] = ibuf[14] ^ ip[14]; obuf[15] = ibuf[15] ^ ip[15]; + i += AES_BLOCK_SIZE; + ip += AES_BLOCK_SIZE; + ibuf += AES_BLOCK_SIZE; + obuf += AES_BLOCK_SIZE; + } + + while(i++ < blen) + *obuf++ = *ibuf++ ^ ip[b_pos++]; + } + + ctx->inf.b[2] = (uint_8t)b_pos; + return EXIT_SUCCESS; +} + +#if defined(__cplusplus) +} +#endif +#endif diff --git a/libs/libks/crypt/aescpp.h b/libs/libks/crypt/aescpp.h new file mode 100644 index 0000000000..e283cfa550 --- /dev/null +++ b/libs/libks/crypt/aescpp.h @@ -0,0 +1,141 @@ +/* +--------------------------------------------------------------------------- +Copyright (c) 1998-2010, Brian Gladman, Worcester, UK. All rights reserved. + +The redistribution and use of this software (with or without changes) +is allowed without the payment of fees or royalties provided that: + + source code distributions include the above copyright notice, this + list of conditions and the following disclaimer; + + binary distributions include the above copyright notice, this list + of conditions and the following disclaimer in their documentation. + +This software is provided 'as is' with no explicit or implied warranties +in respect of its operation, including, but not limited to, correctness +and fitness for purpose. +--------------------------------------------------------------------------- +Issue Date: 20/12/2007 + + This file contains the definitions required to use AES (Rijndael) in C++. +*/ + +#ifndef _AESCPP_H +#define _AESCPP_H + +#include "aes.h" + +#if defined( AES_ENCRYPT ) + +class AESencrypt +{ +public: + aes_encrypt_ctx cx[1]; + AESencrypt(void) { aes_init_zrtp(); }; +#if defined(AES_128) + AESencrypt(const unsigned char key[]) + { aes_encrypt_key128(key, cx); } + AES_RETURN key128(const unsigned char key[]) + { return aes_encrypt_key128(key, cx); } +#endif +#if defined(AES_192) + AES_RETURN key192(const unsigned char key[]) + { return aes_encrypt_key192(key, cx); } +#endif +#if defined(AES_256) + AES_RETURN key256(const unsigned char key[]) + { return aes_encrypt_key256(key, cx); } +#endif +#if defined(AES_VAR) + AES_RETURN key(const unsigned char key[], int key_len) + { return aes_encrypt_key(key, key_len, cx); } +#endif + AES_RETURN encrypt(const unsigned char in[], unsigned char out[]) const + { return aes_encrypt(in, out, cx); } +#ifndef AES_MODES + AES_RETURN ecb_encrypt(const unsigned char in[], unsigned char out[], int nb) const + { while(nb--) + { aes_encrypt(in, out, cx), in += AES_BLOCK_SIZE, out += AES_BLOCK_SIZE; } + } +#endif +#ifdef AES_MODES + AES_RETURN mode_reset(void) { return aes_mode_reset(cx); } + + AES_RETURN ecb_encrypt(const unsigned char in[], unsigned char out[], int nb) const + { return aes_ecb_encrypt(in, out, nb, cx); } + + AES_RETURN cbc_encrypt(const unsigned char in[], unsigned char out[], int nb, + unsigned char iv[]) const + { return aes_cbc_encrypt(in, out, nb, iv, cx); } + + AES_RETURN cfb_encrypt(const unsigned char in[], unsigned char out[], int nb, + unsigned char iv[]) + { return aes_cfb_encrypt(in, out, nb, iv, cx); } + + AES_RETURN cfb_decrypt(const unsigned char in[], unsigned char out[], int nb, + unsigned char iv[]) + { return aes_cfb_decrypt(in, out, nb, iv, cx); } + + AES_RETURN ofb_crypt(const unsigned char in[], unsigned char out[], int nb, + unsigned char iv[]) + { return aes_ofb_crypt(in, out, nb, iv, cx); } + + typedef void ctr_fn(unsigned char ctr[]); + + AES_RETURN ctr_crypt(const unsigned char in[], unsigned char out[], int nb, + unsigned char iv[], ctr_fn cf) + { return aes_ctr_crypt(in, out, nb, iv, cf, cx); } + +#endif + +}; + +#endif + +#if defined( AES_DECRYPT ) + +class AESdecrypt +{ +public: + aes_decrypt_ctx cx[1]; + AESdecrypt(void) { aes_init_zrtp(); }; +#if defined(AES_128) + AESdecrypt(const unsigned char key[]) + { aes_decrypt_key128(key, cx); } + AES_RETURN key128(const unsigned char key[]) + { return aes_decrypt_key128(key, cx); } +#endif +#if defined(AES_192) + AES_RETURN key192(const unsigned char key[]) + { return aes_decrypt_key192(key, cx); } +#endif +#if defined(AES_256) + AES_RETURN key256(const unsigned char key[]) + { return aes_decrypt_key256(key, cx); } +#endif +#if defined(AES_VAR) + AES_RETURN key(const unsigned char key[], int key_len) + { return aes_decrypt_key(key, key_len, cx); } +#endif + AES_RETURN decrypt(const unsigned char in[], unsigned char out[]) const + { return aes_decrypt(in, out, cx); } +#ifndef AES_MODES + AES_RETURN ecb_decrypt(const unsigned char in[], unsigned char out[], int nb) const + { while(nb--) + { aes_decrypt(in, out, cx), in += AES_BLOCK_SIZE, out += AES_BLOCK_SIZE; } + } +#endif +#ifdef AES_MODES + + AES_RETURN ecb_decrypt(const unsigned char in[], unsigned char out[], int nb) const + { return aes_ecb_decrypt(in, out, nb, cx); } + + AES_RETURN cbc_decrypt(const unsigned char in[], unsigned char out[], int nb, + unsigned char iv[]) const + { return aes_cbc_decrypt(in, out, nb, iv, cx); } +#endif +}; + +#endif + +#endif diff --git a/libs/libks/crypt/aescrypt.c b/libs/libks/crypt/aescrypt.c new file mode 100644 index 0000000000..6095f41a68 --- /dev/null +++ b/libs/libks/crypt/aescrypt.c @@ -0,0 +1,294 @@ +/* +--------------------------------------------------------------------------- +Copyright (c) 1998-2010, Brian Gladman, Worcester, UK. All rights reserved. + +The redistribution and use of this software (with or without changes) +is allowed without the payment of fees or royalties provided that: + + source code distributions include the above copyright notice, this + list of conditions and the following disclaimer; + + binary distributions include the above copyright notice, this list + of conditions and the following disclaimer in their documentation. + +This software is provided 'as is' with no explicit or implied warranties +in respect of its operation, including, but not limited to, correctness +and fitness for purpose. +--------------------------------------------------------------------------- +Issue Date: 20/12/2007 +*/ + +#include "aesopt.h" +#include "aestab.h" + +#if defined(__cplusplus) +extern "C" +{ +#endif + +#define si(y,x,k,c) (s(y,c) = word_in(x, c) ^ (k)[c]) +#define so(y,x,c) word_out(y, c, s(x,c)) + +#if defined(ARRAYS) +#define locals(y,x) x[4],y[4] +#else +#define locals(y,x) x##0,x##1,x##2,x##3,y##0,y##1,y##2,y##3 +#endif + +#define l_copy(y, x) s(y,0) = s(x,0); s(y,1) = s(x,1); \ + s(y,2) = s(x,2); s(y,3) = s(x,3); +#define state_in(y,x,k) si(y,x,k,0); si(y,x,k,1); si(y,x,k,2); si(y,x,k,3) +#define state_out(y,x) so(y,x,0); so(y,x,1); so(y,x,2); so(y,x,3) +#define round(rm,y,x,k) rm(y,x,k,0); rm(y,x,k,1); rm(y,x,k,2); rm(y,x,k,3) + +#if ( FUNCS_IN_C & ENCRYPTION_IN_C ) + +/* Visual C++ .Net v7.1 provides the fastest encryption code when using + Pentium optimiation with small code but this is poor for decryption + so we need to control this with the following VC++ pragmas +*/ + +#if defined( _MSC_VER ) && !defined( _WIN64 ) +#pragma optimize( "s", on ) +#endif + +/* Given the column (c) of the output state variable, the following + macros give the input state variables which are needed in its + computation for each row (r) of the state. All the alternative + macros give the same end values but expand into different ways + of calculating these values. In particular the complex macro + used for dynamically variable block sizes is designed to expand + to a compile time constant whenever possible but will expand to + conditional clauses on some branches (I am grateful to Frank + Yellin for this construction) +*/ + +#define fwd_var(x,r,c)\ + ( r == 0 ? ( c == 0 ? s(x,0) : c == 1 ? s(x,1) : c == 2 ? s(x,2) : s(x,3))\ + : r == 1 ? ( c == 0 ? s(x,1) : c == 1 ? s(x,2) : c == 2 ? s(x,3) : s(x,0))\ + : r == 2 ? ( c == 0 ? s(x,2) : c == 1 ? s(x,3) : c == 2 ? s(x,0) : s(x,1))\ + : ( c == 0 ? s(x,3) : c == 1 ? s(x,0) : c == 2 ? s(x,1) : s(x,2))) + +#if defined(FT4_SET) +#undef dec_fmvars +#define fwd_rnd(y,x,k,c) (s(y,c) = (k)[c] ^ four_tables(x,t_use(f,n),fwd_var,rf1,c)) +#elif defined(FT1_SET) +#undef dec_fmvars +#define fwd_rnd(y,x,k,c) (s(y,c) = (k)[c] ^ one_table(x,upr,t_use(f,n),fwd_var,rf1,c)) +#else +#define fwd_rnd(y,x,k,c) (s(y,c) = (k)[c] ^ fwd_mcol(no_table(x,t_use(s,box),fwd_var,rf1,c))) +#endif + +#if defined(FL4_SET) +#define fwd_lrnd(y,x,k,c) (s(y,c) = (k)[c] ^ four_tables(x,t_use(f,l),fwd_var,rf1,c)) +#elif defined(FL1_SET) +#define fwd_lrnd(y,x,k,c) (s(y,c) = (k)[c] ^ one_table(x,ups,t_use(f,l),fwd_var,rf1,c)) +#else +#define fwd_lrnd(y,x,k,c) (s(y,c) = (k)[c] ^ no_table(x,t_use(s,box),fwd_var,rf1,c)) +#endif + +AES_RETURN aes_encrypt(const unsigned char *in, unsigned char *out, const aes_encrypt_ctx cx[1]) +{ uint_32t locals(b0, b1); + const uint_32t *kp; +#if defined( dec_fmvars ) + dec_fmvars; /* declare variables for fwd_mcol() if needed */ +#endif + + if( cx->inf.b[0] != 10 * 16 && cx->inf.b[0] != 12 * 16 && cx->inf.b[0] != 14 * 16 ) + return EXIT_FAILURE; + + kp = cx->ks; + state_in(b0, in, kp); + +#if (ENC_UNROLL == FULL) + + switch(cx->inf.b[0]) + { + case 14 * 16: + round(fwd_rnd, b1, b0, kp + 1 * N_COLS); + round(fwd_rnd, b0, b1, kp + 2 * N_COLS); + kp += 2 * N_COLS; + case 12 * 16: + round(fwd_rnd, b1, b0, kp + 1 * N_COLS); + round(fwd_rnd, b0, b1, kp + 2 * N_COLS); + kp += 2 * N_COLS; + case 10 * 16: + round(fwd_rnd, b1, b0, kp + 1 * N_COLS); + round(fwd_rnd, b0, b1, kp + 2 * N_COLS); + round(fwd_rnd, b1, b0, kp + 3 * N_COLS); + round(fwd_rnd, b0, b1, kp + 4 * N_COLS); + round(fwd_rnd, b1, b0, kp + 5 * N_COLS); + round(fwd_rnd, b0, b1, kp + 6 * N_COLS); + round(fwd_rnd, b1, b0, kp + 7 * N_COLS); + round(fwd_rnd, b0, b1, kp + 8 * N_COLS); + round(fwd_rnd, b1, b0, kp + 9 * N_COLS); + round(fwd_lrnd, b0, b1, kp +10 * N_COLS); + } + +#else + +#if (ENC_UNROLL == PARTIAL) + { uint_32t rnd; + for(rnd = 0; rnd < (cx->inf.b[0] >> 5) - 1; ++rnd) + { + kp += N_COLS; + round(fwd_rnd, b1, b0, kp); + kp += N_COLS; + round(fwd_rnd, b0, b1, kp); + } + kp += N_COLS; + round(fwd_rnd, b1, b0, kp); +#else + { uint_32t rnd; + for(rnd = 0; rnd < (cx->inf.b[0] >> 4) - 1; ++rnd) + { + kp += N_COLS; + round(fwd_rnd, b1, b0, kp); + l_copy(b0, b1); + } +#endif + kp += N_COLS; + round(fwd_lrnd, b0, b1, kp); + } +#endif + + state_out(out, b0); + return EXIT_SUCCESS; +} + +#endif + +#if ( FUNCS_IN_C & DECRYPTION_IN_C) + +/* Visual C++ .Net v7.1 provides the fastest encryption code when using + Pentium optimiation with small code but this is poor for decryption + so we need to control this with the following VC++ pragmas +*/ + +#if defined( _MSC_VER ) && !defined( _WIN64 ) +#pragma optimize( "t", on ) +#endif + +/* Given the column (c) of the output state variable, the following + macros give the input state variables which are needed in its + computation for each row (r) of the state. All the alternative + macros give the same end values but expand into different ways + of calculating these values. In particular the complex macro + used for dynamically variable block sizes is designed to expand + to a compile time constant whenever possible but will expand to + conditional clauses on some branches (I am grateful to Frank + Yellin for this construction) +*/ + +#define inv_var(x,r,c)\ + ( r == 0 ? ( c == 0 ? s(x,0) : c == 1 ? s(x,1) : c == 2 ? s(x,2) : s(x,3))\ + : r == 1 ? ( c == 0 ? s(x,3) : c == 1 ? s(x,0) : c == 2 ? s(x,1) : s(x,2))\ + : r == 2 ? ( c == 0 ? s(x,2) : c == 1 ? s(x,3) : c == 2 ? s(x,0) : s(x,1))\ + : ( c == 0 ? s(x,1) : c == 1 ? s(x,2) : c == 2 ? s(x,3) : s(x,0))) + +#if defined(IT4_SET) +#undef dec_imvars +#define inv_rnd(y,x,k,c) (s(y,c) = (k)[c] ^ four_tables(x,t_use(i,n),inv_var,rf1,c)) +#elif defined(IT1_SET) +#undef dec_imvars +#define inv_rnd(y,x,k,c) (s(y,c) = (k)[c] ^ one_table(x,upr,t_use(i,n),inv_var,rf1,c)) +#else +#define inv_rnd(y,x,k,c) (s(y,c) = inv_mcol((k)[c] ^ no_table(x,t_use(i,box),inv_var,rf1,c))) +#endif + +#if defined(IL4_SET) +#define inv_lrnd(y,x,k,c) (s(y,c) = (k)[c] ^ four_tables(x,t_use(i,l),inv_var,rf1,c)) +#elif defined(IL1_SET) +#define inv_lrnd(y,x,k,c) (s(y,c) = (k)[c] ^ one_table(x,ups,t_use(i,l),inv_var,rf1,c)) +#else +#define inv_lrnd(y,x,k,c) (s(y,c) = (k)[c] ^ no_table(x,t_use(i,box),inv_var,rf1,c)) +#endif + +/* This code can work with the decryption key schedule in the */ +/* order that is used for encrytpion (where the 1st decryption */ +/* round key is at the high end ot the schedule) or with a key */ +/* schedule that has been reversed to put the 1st decryption */ +/* round key at the low end of the schedule in memory (when */ +/* AES_REV_DKS is defined) */ + +#ifdef AES_REV_DKS +#define key_ofs 0 +#define rnd_key(n) (kp + n * N_COLS) +#else +#define key_ofs 1 +#define rnd_key(n) (kp - n * N_COLS) +#endif + +AES_RETURN aes_decrypt(const unsigned char *in, unsigned char *out, const aes_decrypt_ctx cx[1]) +{ uint_32t locals(b0, b1); +#if defined( dec_imvars ) + dec_imvars; /* declare variables for inv_mcol() if needed */ +#endif + const uint_32t *kp; + + if( cx->inf.b[0] != 10 * 16 && cx->inf.b[0] != 12 * 16 && cx->inf.b[0] != 14 * 16 ) + return EXIT_FAILURE; + + kp = cx->ks + (key_ofs ? (cx->inf.b[0] >> 2) : 0); + state_in(b0, in, kp); + +#if (DEC_UNROLL == FULL) + + kp = cx->ks + (key_ofs ? 0 : (cx->inf.b[0] >> 2)); + switch(cx->inf.b[0]) + { + case 14 * 16: + round(inv_rnd, b1, b0, rnd_key(-13)); + round(inv_rnd, b0, b1, rnd_key(-12)); + case 12 * 16: + round(inv_rnd, b1, b0, rnd_key(-11)); + round(inv_rnd, b0, b1, rnd_key(-10)); + case 10 * 16: + round(inv_rnd, b1, b0, rnd_key(-9)); + round(inv_rnd, b0, b1, rnd_key(-8)); + round(inv_rnd, b1, b0, rnd_key(-7)); + round(inv_rnd, b0, b1, rnd_key(-6)); + round(inv_rnd, b1, b0, rnd_key(-5)); + round(inv_rnd, b0, b1, rnd_key(-4)); + round(inv_rnd, b1, b0, rnd_key(-3)); + round(inv_rnd, b0, b1, rnd_key(-2)); + round(inv_rnd, b1, b0, rnd_key(-1)); + round(inv_lrnd, b0, b1, rnd_key( 0)); + } + +#else + +#if (DEC_UNROLL == PARTIAL) + { uint_32t rnd; + for(rnd = 0; rnd < (cx->inf.b[0] >> 5) - 1; ++rnd) + { + kp = rnd_key(1); + round(inv_rnd, b1, b0, kp); + kp = rnd_key(1); + round(inv_rnd, b0, b1, kp); + } + kp = rnd_key(1); + round(inv_rnd, b1, b0, kp); +#else + { uint_32t rnd; + for(rnd = 0; rnd < (cx->inf.b[0] >> 4) - 1; ++rnd) + { + kp = rnd_key(1); + round(inv_rnd, b1, b0, kp); + l_copy(b0, b1); + } +#endif + kp = rnd_key(1); + round(inv_lrnd, b0, b1, kp); + } +#endif + + state_out(out, b0); + return EXIT_SUCCESS; +} + +#endif + +#if defined(__cplusplus) +} +#endif diff --git a/libs/libks/crypt/aeskey.c b/libs/libks/crypt/aeskey.c new file mode 100644 index 0000000000..14b435f8c1 --- /dev/null +++ b/libs/libks/crypt/aeskey.c @@ -0,0 +1,556 @@ +/* +--------------------------------------------------------------------------- +Copyright (c) 1998-2010, Brian Gladman, Worcester, UK. All rights reserved. + +The redistribution and use of this software (with or without changes) +is allowed without the payment of fees or royalties provided that: + + source code distributions include the above copyright notice, this + list of conditions and the following disclaimer; + + binary distributions include the above copyright notice, this list + of conditions and the following disclaimer in their documentation. + +This software is provided 'as is' with no explicit or implied warranties +in respect of its operation, including, but not limited to, correctness +and fitness for purpose. +--------------------------------------------------------------------------- +Issue Date: 20/12/2007 +*/ + +#include "aesopt.h" +#include "aestab.h" + +/* +#ifdef USE_VIA_ACE_IF_PRESENT +# include "aes_via_ace.h" +#endif +*/ + +#if defined(__cplusplus) +extern "C" +{ +#endif + +/* Initialise the key schedule from the user supplied key. The key + length can be specified in bytes, with legal values of 16, 24 + and 32, or in bits, with legal values of 128, 192 and 256. These + values correspond with Nk values of 4, 6 and 8 respectively. + + The following macros implement a single cycle in the key + schedule generation process. The number of cycles needed + for each cx->n_col and nk value is: + + nk = 4 5 6 7 8 + ------------------------------ + cx->n_col = 4 10 9 8 7 7 + cx->n_col = 5 14 11 10 9 9 + cx->n_col = 6 19 15 12 11 11 + cx->n_col = 7 21 19 16 13 14 + cx->n_col = 8 29 23 19 17 14 +*/ + +#if defined( REDUCE_CODE_SIZE ) +# define ls_box ls_sub + uint_32t ls_sub(const uint_32t t, const uint_32t n); +# define inv_mcol im_sub + uint_32t im_sub(const uint_32t x); +# ifdef ENC_KS_UNROLL +# undef ENC_KS_UNROLL +# endif +# ifdef DEC_KS_UNROLL +# undef DEC_KS_UNROLL +# endif +#endif + +#if (FUNCS_IN_C & ENC_KEYING_IN_C) + +#if defined(AES_128) || defined( AES_VAR ) + +#define ke4(k,i) \ +{ k[4*(i)+4] = ss[0] ^= ls_box(ss[3],3) ^ t_use(r,c)[i]; \ + k[4*(i)+5] = ss[1] ^= ss[0]; \ + k[4*(i)+6] = ss[2] ^= ss[1]; \ + k[4*(i)+7] = ss[3] ^= ss[2]; \ +} + +AES_RETURN aes_encrypt_key128(const unsigned char *key, aes_encrypt_ctx cx[1]) +{ uint_32t ss[4]; + + cx->ks[0] = ss[0] = word_in(key, 0); + cx->ks[1] = ss[1] = word_in(key, 1); + cx->ks[2] = ss[2] = word_in(key, 2); + cx->ks[3] = ss[3] = word_in(key, 3); + +#ifdef ENC_KS_UNROLL + ke4(cx->ks, 0); ke4(cx->ks, 1); + ke4(cx->ks, 2); ke4(cx->ks, 3); + ke4(cx->ks, 4); ke4(cx->ks, 5); + ke4(cx->ks, 6); ke4(cx->ks, 7); + ke4(cx->ks, 8); +#else + { uint_32t i; + for(i = 0; i < 9; ++i) + ke4(cx->ks, i); + } +#endif + ke4(cx->ks, 9); + cx->inf.l = 0; + cx->inf.b[0] = 10 * 16; + +#ifdef USE_VIA_ACE_IF_PRESENT + if(VIA_ACE_AVAILABLE) + cx->inf.b[1] = 0xff; +#endif + return EXIT_SUCCESS; +} + +#endif + +#if defined(AES_192) || defined( AES_VAR ) + +#define kef6(k,i) \ +{ k[6*(i)+ 6] = ss[0] ^= ls_box(ss[5],3) ^ t_use(r,c)[i]; \ + k[6*(i)+ 7] = ss[1] ^= ss[0]; \ + k[6*(i)+ 8] = ss[2] ^= ss[1]; \ + k[6*(i)+ 9] = ss[3] ^= ss[2]; \ +} + +#define ke6(k,i) \ +{ kef6(k,i); \ + k[6*(i)+10] = ss[4] ^= ss[3]; \ + k[6*(i)+11] = ss[5] ^= ss[4]; \ +} + +AES_RETURN aes_encrypt_key192(const unsigned char *key, aes_encrypt_ctx cx[1]) +{ uint_32t ss[6]; + + cx->ks[0] = ss[0] = word_in(key, 0); + cx->ks[1] = ss[1] = word_in(key, 1); + cx->ks[2] = ss[2] = word_in(key, 2); + cx->ks[3] = ss[3] = word_in(key, 3); + cx->ks[4] = ss[4] = word_in(key, 4); + cx->ks[5] = ss[5] = word_in(key, 5); + +#ifdef ENC_KS_UNROLL + ke6(cx->ks, 0); ke6(cx->ks, 1); + ke6(cx->ks, 2); ke6(cx->ks, 3); + ke6(cx->ks, 4); ke6(cx->ks, 5); + ke6(cx->ks, 6); +#else + { uint_32t i; + for(i = 0; i < 7; ++i) + ke6(cx->ks, i); + } +#endif + kef6(cx->ks, 7); + cx->inf.l = 0; + cx->inf.b[0] = 12 * 16; + +#ifdef USE_VIA_ACE_IF_PRESENT + if(VIA_ACE_AVAILABLE) + cx->inf.b[1] = 0xff; +#endif + return EXIT_SUCCESS; +} + +#endif + +#if defined(AES_256) || defined( AES_VAR ) + +#define kef8(k,i) \ +{ k[8*(i)+ 8] = ss[0] ^= ls_box(ss[7],3) ^ t_use(r,c)[i]; \ + k[8*(i)+ 9] = ss[1] ^= ss[0]; \ + k[8*(i)+10] = ss[2] ^= ss[1]; \ + k[8*(i)+11] = ss[3] ^= ss[2]; \ +} + +#define ke8(k,i) \ +{ kef8(k,i); \ + k[8*(i)+12] = ss[4] ^= ls_box(ss[3],0); \ + k[8*(i)+13] = ss[5] ^= ss[4]; \ + k[8*(i)+14] = ss[6] ^= ss[5]; \ + k[8*(i)+15] = ss[7] ^= ss[6]; \ +} + +AES_RETURN aes_encrypt_key256(const unsigned char *key, aes_encrypt_ctx cx[1]) +{ uint_32t ss[8]; + + cx->ks[0] = ss[0] = word_in(key, 0); + cx->ks[1] = ss[1] = word_in(key, 1); + cx->ks[2] = ss[2] = word_in(key, 2); + cx->ks[3] = ss[3] = word_in(key, 3); + cx->ks[4] = ss[4] = word_in(key, 4); + cx->ks[5] = ss[5] = word_in(key, 5); + cx->ks[6] = ss[6] = word_in(key, 6); + cx->ks[7] = ss[7] = word_in(key, 7); + +#ifdef ENC_KS_UNROLL + ke8(cx->ks, 0); ke8(cx->ks, 1); + ke8(cx->ks, 2); ke8(cx->ks, 3); + ke8(cx->ks, 4); ke8(cx->ks, 5); +#else + { uint_32t i; + for(i = 0; i < 6; ++i) + ke8(cx->ks, i); + } +#endif + kef8(cx->ks, 6); + cx->inf.l = 0; + cx->inf.b[0] = 14 * 16; + +#ifdef USE_VIA_ACE_IF_PRESENT + if(VIA_ACE_AVAILABLE) + cx->inf.b[1] = 0xff; +#endif + return EXIT_SUCCESS; +} + +#endif + +#if defined( AES_VAR ) + +AES_RETURN aes_encrypt_key(const unsigned char *key, int key_len, aes_encrypt_ctx cx[1]) +{ + switch(key_len) + { + case 16: case 128: return aes_encrypt_key128(key, cx); + case 24: case 192: return aes_encrypt_key192(key, cx); + case 32: case 256: return aes_encrypt_key256(key, cx); + default: return EXIT_FAILURE; + } +} + +#endif + +#endif + +#if (FUNCS_IN_C & DEC_KEYING_IN_C) + +/* this is used to store the decryption round keys */ +/* in forward or reverse order */ + +#ifdef AES_REV_DKS +#define v(n,i) ((n) - (i) + 2 * ((i) & 3)) +#else +#define v(n,i) (i) +#endif + +#if DEC_ROUND == NO_TABLES +#define ff(x) (x) +#else +#define ff(x) inv_mcol(x) +#if defined( dec_imvars ) +#define d_vars dec_imvars +#endif +#endif + +#if defined(AES_128) || defined( AES_VAR ) + +#define k4e(k,i) \ +{ k[v(40,(4*(i))+4)] = ss[0] ^= ls_box(ss[3],3) ^ t_use(r,c)[i]; \ + k[v(40,(4*(i))+5)] = ss[1] ^= ss[0]; \ + k[v(40,(4*(i))+6)] = ss[2] ^= ss[1]; \ + k[v(40,(4*(i))+7)] = ss[3] ^= ss[2]; \ +} + +#if 1 + +#define kdf4(k,i) \ +{ ss[0] = ss[0] ^ ss[2] ^ ss[1] ^ ss[3]; \ + ss[1] = ss[1] ^ ss[3]; \ + ss[2] = ss[2] ^ ss[3]; \ + ss[4] = ls_box(ss[(i+3) % 4], 3) ^ t_use(r,c)[i]; \ + ss[i % 4] ^= ss[4]; \ + ss[4] ^= k[v(40,(4*(i)))]; k[v(40,(4*(i))+4)] = ff(ss[4]); \ + ss[4] ^= k[v(40,(4*(i))+1)]; k[v(40,(4*(i))+5)] = ff(ss[4]); \ + ss[4] ^= k[v(40,(4*(i))+2)]; k[v(40,(4*(i))+6)] = ff(ss[4]); \ + ss[4] ^= k[v(40,(4*(i))+3)]; k[v(40,(4*(i))+7)] = ff(ss[4]); \ +} + +#define kd4(k,i) \ +{ ss[4] = ls_box(ss[(i+3) % 4], 3) ^ t_use(r,c)[i]; \ + ss[i % 4] ^= ss[4]; ss[4] = ff(ss[4]); \ + k[v(40,(4*(i))+4)] = ss[4] ^= k[v(40,(4*(i)))]; \ + k[v(40,(4*(i))+5)] = ss[4] ^= k[v(40,(4*(i))+1)]; \ + k[v(40,(4*(i))+6)] = ss[4] ^= k[v(40,(4*(i))+2)]; \ + k[v(40,(4*(i))+7)] = ss[4] ^= k[v(40,(4*(i))+3)]; \ +} + +#define kdl4(k,i) \ +{ ss[4] = ls_box(ss[(i+3) % 4], 3) ^ t_use(r,c)[i]; ss[i % 4] ^= ss[4]; \ + k[v(40,(4*(i))+4)] = (ss[0] ^= ss[1]) ^ ss[2] ^ ss[3]; \ + k[v(40,(4*(i))+5)] = ss[1] ^ ss[3]; \ + k[v(40,(4*(i))+6)] = ss[0]; \ + k[v(40,(4*(i))+7)] = ss[1]; \ +} + +#else + +#define kdf4(k,i) \ +{ ss[0] ^= ls_box(ss[3],3) ^ t_use(r,c)[i]; k[v(40,(4*(i))+ 4)] = ff(ss[0]); \ + ss[1] ^= ss[0]; k[v(40,(4*(i))+ 5)] = ff(ss[1]); \ + ss[2] ^= ss[1]; k[v(40,(4*(i))+ 6)] = ff(ss[2]); \ + ss[3] ^= ss[2]; k[v(40,(4*(i))+ 7)] = ff(ss[3]); \ +} + +#define kd4(k,i) \ +{ ss[4] = ls_box(ss[3],3) ^ t_use(r,c)[i]; \ + ss[0] ^= ss[4]; ss[4] = ff(ss[4]); k[v(40,(4*(i))+ 4)] = ss[4] ^= k[v(40,(4*(i)))]; \ + ss[1] ^= ss[0]; k[v(40,(4*(i))+ 5)] = ss[4] ^= k[v(40,(4*(i))+ 1)]; \ + ss[2] ^= ss[1]; k[v(40,(4*(i))+ 6)] = ss[4] ^= k[v(40,(4*(i))+ 2)]; \ + ss[3] ^= ss[2]; k[v(40,(4*(i))+ 7)] = ss[4] ^= k[v(40,(4*(i))+ 3)]; \ +} + +#define kdl4(k,i) \ +{ ss[0] ^= ls_box(ss[3],3) ^ t_use(r,c)[i]; k[v(40,(4*(i))+ 4)] = ss[0]; \ + ss[1] ^= ss[0]; k[v(40,(4*(i))+ 5)] = ss[1]; \ + ss[2] ^= ss[1]; k[v(40,(4*(i))+ 6)] = ss[2]; \ + ss[3] ^= ss[2]; k[v(40,(4*(i))+ 7)] = ss[3]; \ +} + +#endif + +AES_RETURN aes_decrypt_key128(const unsigned char *key, aes_decrypt_ctx cx[1]) +{ uint_32t ss[5]; +#if defined( d_vars ) + d_vars; +#endif + cx->ks[v(40,(0))] = ss[0] = word_in(key, 0); + cx->ks[v(40,(1))] = ss[1] = word_in(key, 1); + cx->ks[v(40,(2))] = ss[2] = word_in(key, 2); + cx->ks[v(40,(3))] = ss[3] = word_in(key, 3); + +#ifdef DEC_KS_UNROLL + kdf4(cx->ks, 0); kd4(cx->ks, 1); + kd4(cx->ks, 2); kd4(cx->ks, 3); + kd4(cx->ks, 4); kd4(cx->ks, 5); + kd4(cx->ks, 6); kd4(cx->ks, 7); + kd4(cx->ks, 8); kdl4(cx->ks, 9); +#else + { uint_32t i; + for(i = 0; i < 10; ++i) + k4e(cx->ks, i); +#if !(DEC_ROUND == NO_TABLES) + for(i = N_COLS; i < 10 * N_COLS; ++i) + cx->ks[i] = inv_mcol(cx->ks[i]); +#endif + } +#endif + cx->inf.l = 0; + cx->inf.b[0] = 10 * 16; + +#ifdef USE_VIA_ACE_IF_PRESENT + if(VIA_ACE_AVAILABLE) + cx->inf.b[1] = 0xff; +#endif + return EXIT_SUCCESS; +} + +#endif + +#if defined(AES_192) || defined( AES_VAR ) + +#define k6ef(k,i) \ +{ k[v(48,(6*(i))+ 6)] = ss[0] ^= ls_box(ss[5],3) ^ t_use(r,c)[i]; \ + k[v(48,(6*(i))+ 7)] = ss[1] ^= ss[0]; \ + k[v(48,(6*(i))+ 8)] = ss[2] ^= ss[1]; \ + k[v(48,(6*(i))+ 9)] = ss[3] ^= ss[2]; \ +} + +#define k6e(k,i) \ +{ k6ef(k,i); \ + k[v(48,(6*(i))+10)] = ss[4] ^= ss[3]; \ + k[v(48,(6*(i))+11)] = ss[5] ^= ss[4]; \ +} + +#define kdf6(k,i) \ +{ ss[0] ^= ls_box(ss[5],3) ^ t_use(r,c)[i]; k[v(48,(6*(i))+ 6)] = ff(ss[0]); \ + ss[1] ^= ss[0]; k[v(48,(6*(i))+ 7)] = ff(ss[1]); \ + ss[2] ^= ss[1]; k[v(48,(6*(i))+ 8)] = ff(ss[2]); \ + ss[3] ^= ss[2]; k[v(48,(6*(i))+ 9)] = ff(ss[3]); \ + ss[4] ^= ss[3]; k[v(48,(6*(i))+10)] = ff(ss[4]); \ + ss[5] ^= ss[4]; k[v(48,(6*(i))+11)] = ff(ss[5]); \ +} + +#define kd6(k,i) \ +{ ss[6] = ls_box(ss[5],3) ^ t_use(r,c)[i]; \ + ss[0] ^= ss[6]; ss[6] = ff(ss[6]); k[v(48,(6*(i))+ 6)] = ss[6] ^= k[v(48,(6*(i)))]; \ + ss[1] ^= ss[0]; k[v(48,(6*(i))+ 7)] = ss[6] ^= k[v(48,(6*(i))+ 1)]; \ + ss[2] ^= ss[1]; k[v(48,(6*(i))+ 8)] = ss[6] ^= k[v(48,(6*(i))+ 2)]; \ + ss[3] ^= ss[2]; k[v(48,(6*(i))+ 9)] = ss[6] ^= k[v(48,(6*(i))+ 3)]; \ + ss[4] ^= ss[3]; k[v(48,(6*(i))+10)] = ss[6] ^= k[v(48,(6*(i))+ 4)]; \ + ss[5] ^= ss[4]; k[v(48,(6*(i))+11)] = ss[6] ^= k[v(48,(6*(i))+ 5)]; \ +} + +#define kdl6(k,i) \ +{ ss[0] ^= ls_box(ss[5],3) ^ t_use(r,c)[i]; k[v(48,(6*(i))+ 6)] = ss[0]; \ + ss[1] ^= ss[0]; k[v(48,(6*(i))+ 7)] = ss[1]; \ + ss[2] ^= ss[1]; k[v(48,(6*(i))+ 8)] = ss[2]; \ + ss[3] ^= ss[2]; k[v(48,(6*(i))+ 9)] = ss[3]; \ +} + +AES_RETURN aes_decrypt_key192(const unsigned char *key, aes_decrypt_ctx cx[1]) +{ uint_32t ss[7]; +#if defined( d_vars ) + d_vars; +#endif + cx->ks[v(48,(0))] = ss[0] = word_in(key, 0); + cx->ks[v(48,(1))] = ss[1] = word_in(key, 1); + cx->ks[v(48,(2))] = ss[2] = word_in(key, 2); + cx->ks[v(48,(3))] = ss[3] = word_in(key, 3); + +#ifdef DEC_KS_UNROLL + ss[4] = word_in(key, 4); + cx->ks[v(48,(4))] = ff(ss[4]); + ss[5] = word_in(key, 5); + cx->ks[v(48,(5))] = ff(ss[5]); + kdf6(cx->ks, 0); kd6(cx->ks, 1); + kd6(cx->ks, 2); kd6(cx->ks, 3); + kd6(cx->ks, 4); kd6(cx->ks, 5); + kd6(cx->ks, 6); kdl6(cx->ks, 7); +#else + cx->ks[v(48,(4))] = ss[4] = word_in(key, 4); + cx->ks[v(48,(5))] = ss[5] = word_in(key, 5); + { uint_32t i; + + for(i = 0; i < 7; ++i) + k6e(cx->ks, i); + k6ef(cx->ks, 7); +#if !(DEC_ROUND == NO_TABLES) + for(i = N_COLS; i < 12 * N_COLS; ++i) + cx->ks[i] = inv_mcol(cx->ks[i]); +#endif + } +#endif + cx->inf.l = 0; + cx->inf.b[0] = 12 * 16; + +#ifdef USE_VIA_ACE_IF_PRESENT + if(VIA_ACE_AVAILABLE) + cx->inf.b[1] = 0xff; +#endif + return EXIT_SUCCESS; +} + +#endif + +#if defined(AES_256) || defined( AES_VAR ) + +#define k8ef(k,i) \ +{ k[v(56,(8*(i))+ 8)] = ss[0] ^= ls_box(ss[7],3) ^ t_use(r,c)[i]; \ + k[v(56,(8*(i))+ 9)] = ss[1] ^= ss[0]; \ + k[v(56,(8*(i))+10)] = ss[2] ^= ss[1]; \ + k[v(56,(8*(i))+11)] = ss[3] ^= ss[2]; \ +} + +#define k8e(k,i) \ +{ k8ef(k,i); \ + k[v(56,(8*(i))+12)] = ss[4] ^= ls_box(ss[3],0); \ + k[v(56,(8*(i))+13)] = ss[5] ^= ss[4]; \ + k[v(56,(8*(i))+14)] = ss[6] ^= ss[5]; \ + k[v(56,(8*(i))+15)] = ss[7] ^= ss[6]; \ +} + +#define kdf8(k,i) \ +{ ss[0] ^= ls_box(ss[7],3) ^ t_use(r,c)[i]; k[v(56,(8*(i))+ 8)] = ff(ss[0]); \ + ss[1] ^= ss[0]; k[v(56,(8*(i))+ 9)] = ff(ss[1]); \ + ss[2] ^= ss[1]; k[v(56,(8*(i))+10)] = ff(ss[2]); \ + ss[3] ^= ss[2]; k[v(56,(8*(i))+11)] = ff(ss[3]); \ + ss[4] ^= ls_box(ss[3],0); k[v(56,(8*(i))+12)] = ff(ss[4]); \ + ss[5] ^= ss[4]; k[v(56,(8*(i))+13)] = ff(ss[5]); \ + ss[6] ^= ss[5]; k[v(56,(8*(i))+14)] = ff(ss[6]); \ + ss[7] ^= ss[6]; k[v(56,(8*(i))+15)] = ff(ss[7]); \ +} + +#define kd8(k,i) \ +{ ss[8] = ls_box(ss[7],3) ^ t_use(r,c)[i]; \ + ss[0] ^= ss[8]; ss[8] = ff(ss[8]); k[v(56,(8*(i))+ 8)] = ss[8] ^= k[v(56,(8*(i)))]; \ + ss[1] ^= ss[0]; k[v(56,(8*(i))+ 9)] = ss[8] ^= k[v(56,(8*(i))+ 1)]; \ + ss[2] ^= ss[1]; k[v(56,(8*(i))+10)] = ss[8] ^= k[v(56,(8*(i))+ 2)]; \ + ss[3] ^= ss[2]; k[v(56,(8*(i))+11)] = ss[8] ^= k[v(56,(8*(i))+ 3)]; \ + ss[8] = ls_box(ss[3],0); \ + ss[4] ^= ss[8]; ss[8] = ff(ss[8]); k[v(56,(8*(i))+12)] = ss[8] ^= k[v(56,(8*(i))+ 4)]; \ + ss[5] ^= ss[4]; k[v(56,(8*(i))+13)] = ss[8] ^= k[v(56,(8*(i))+ 5)]; \ + ss[6] ^= ss[5]; k[v(56,(8*(i))+14)] = ss[8] ^= k[v(56,(8*(i))+ 6)]; \ + ss[7] ^= ss[6]; k[v(56,(8*(i))+15)] = ss[8] ^= k[v(56,(8*(i))+ 7)]; \ +} + +#define kdl8(k,i) \ +{ ss[0] ^= ls_box(ss[7],3) ^ t_use(r,c)[i]; k[v(56,(8*(i))+ 8)] = ss[0]; \ + ss[1] ^= ss[0]; k[v(56,(8*(i))+ 9)] = ss[1]; \ + ss[2] ^= ss[1]; k[v(56,(8*(i))+10)] = ss[2]; \ + ss[3] ^= ss[2]; k[v(56,(8*(i))+11)] = ss[3]; \ +} + +AES_RETURN aes_decrypt_key256(const unsigned char *key, aes_decrypt_ctx cx[1]) +{ uint_32t ss[9]; +#if defined( d_vars ) + d_vars; +#endif + cx->ks[v(56,(0))] = ss[0] = word_in(key, 0); + cx->ks[v(56,(1))] = ss[1] = word_in(key, 1); + cx->ks[v(56,(2))] = ss[2] = word_in(key, 2); + cx->ks[v(56,(3))] = ss[3] = word_in(key, 3); + +#ifdef DEC_KS_UNROLL + ss[4] = word_in(key, 4); + cx->ks[v(56,(4))] = ff(ss[4]); + ss[5] = word_in(key, 5); + cx->ks[v(56,(5))] = ff(ss[5]); + ss[6] = word_in(key, 6); + cx->ks[v(56,(6))] = ff(ss[6]); + ss[7] = word_in(key, 7); + cx->ks[v(56,(7))] = ff(ss[7]); + kdf8(cx->ks, 0); kd8(cx->ks, 1); + kd8(cx->ks, 2); kd8(cx->ks, 3); + kd8(cx->ks, 4); kd8(cx->ks, 5); + kdl8(cx->ks, 6); +#else + cx->ks[v(56,(4))] = ss[4] = word_in(key, 4); + cx->ks[v(56,(5))] = ss[5] = word_in(key, 5); + cx->ks[v(56,(6))] = ss[6] = word_in(key, 6); + cx->ks[v(56,(7))] = ss[7] = word_in(key, 7); + { uint_32t i; + + for(i = 0; i < 6; ++i) + k8e(cx->ks, i); + k8ef(cx->ks, 6); +#if !(DEC_ROUND == NO_TABLES) + for(i = N_COLS; i < 14 * N_COLS; ++i) + cx->ks[i] = inv_mcol(cx->ks[i]); +#endif + } +#endif + cx->inf.l = 0; + cx->inf.b[0] = 14 * 16; + +#ifdef USE_VIA_ACE_IF_PRESENT + if(VIA_ACE_AVAILABLE) + cx->inf.b[1] = 0xff; +#endif + return EXIT_SUCCESS; +} + +#endif + +#if defined( AES_VAR ) + +AES_RETURN aes_decrypt_key(const unsigned char *key, int key_len, aes_decrypt_ctx cx[1]) +{ + switch(key_len) + { + case 16: case 128: return aes_decrypt_key128(key, cx); + case 24: case 192: return aes_decrypt_key192(key, cx); + case 32: case 256: return aes_decrypt_key256(key, cx); + default: return EXIT_FAILURE; + } +} + +#endif + +#endif + +#if defined(__cplusplus) +} +#endif diff --git a/libs/libks/crypt/aesopt.h b/libs/libks/crypt/aesopt.h new file mode 100644 index 0000000000..471e0c9d38 --- /dev/null +++ b/libs/libks/crypt/aesopt.h @@ -0,0 +1,742 @@ +/* +--------------------------------------------------------------------------- +Copyright (c) 1998-2010, Brian Gladman, Worcester, UK. All rights reserved. + +The redistribution and use of this software (with or without changes) +is allowed without the payment of fees or royalties provided that: + + source code distributions include the above copyright notice, this + list of conditions and the following disclaimer; + + binary distributions include the above copyright notice, this list + of conditions and the following disclaimer in their documentation. + +This software is provided 'as is' with no explicit or implied warranties +in respect of its operation, including, but not limited to, correctness +and fitness for purpose. +--------------------------------------------------------------------------- +Issue Date: 20/12/2007 + + This file contains the compilation options for AES (Rijndael) and code + that is common across encryption, key scheduling and table generation. + + OPERATION + + These source code files implement the AES algorithm Rijndael designed by + Joan Daemen and Vincent Rijmen. This version is designed for the standard + block size of 16 bytes and for key sizes of 128, 192 and 256 bits (16, 24 + and 32 bytes). + + This version is designed for flexibility and speed using operations on + 32-bit words rather than operations on bytes. It can be compiled with + either big or little endian internal byte order but is faster when the + native byte order for the processor is used. + + THE CIPHER INTERFACE + + The cipher interface is implemented as an array of bytes in which lower + AES bit sequence indexes map to higher numeric significance within bytes. + + uint_8t (an unsigned 8-bit type) + uint_32t (an unsigned 32-bit type) + struct aes_encrypt_ctx (structure for the cipher encryption context) + struct aes_decrypt_ctx (structure for the cipher decryption context) + AES_RETURN the function return type + + C subroutine calls: + + AES_RETURN aes_encrypt_key128(const unsigned char *key, aes_encrypt_ctx cx[1]); + AES_RETURN aes_encrypt_key192(const unsigned char *key, aes_encrypt_ctx cx[1]); + AES_RETURN aes_encrypt_key256(const unsigned char *key, aes_encrypt_ctx cx[1]); + AES_RETURN aes_encrypt(const unsigned char *in, unsigned char *out, + const aes_encrypt_ctx cx[1]); + + AES_RETURN aes_decrypt_key128(const unsigned char *key, aes_decrypt_ctx cx[1]); + AES_RETURN aes_decrypt_key192(const unsigned char *key, aes_decrypt_ctx cx[1]); + AES_RETURN aes_decrypt_key256(const unsigned char *key, aes_decrypt_ctx cx[1]); + AES_RETURN aes_decrypt(const unsigned char *in, unsigned char *out, + const aes_decrypt_ctx cx[1]); + + IMPORTANT NOTE: If you are using this C interface with dynamic tables make sure that + you call aes_init() before AES is used so that the tables are initialised. + + C++ aes class subroutines: + + Class AESencrypt for encryption + + Construtors: + AESencrypt(void) + AESencrypt(const unsigned char *key) - 128 bit key + Members: + AES_RETURN key128(const unsigned char *key) + AES_RETURN key192(const unsigned char *key) + AES_RETURN key256(const unsigned char *key) + AES_RETURN encrypt(const unsigned char *in, unsigned char *out) const + + Class AESdecrypt for encryption + Construtors: + AESdecrypt(void) + AESdecrypt(const unsigned char *key) - 128 bit key + Members: + AES_RETURN key128(const unsigned char *key) + AES_RETURN key192(const unsigned char *key) + AES_RETURN key256(const unsigned char *key) + AES_RETURN decrypt(const unsigned char *in, unsigned char *out) const +*/ + +#if !defined( _AESOPT_H ) +#define _AESOPT_H + +#if defined( __cplusplus ) +#include "aescpp.h" +#else +#include "aes.h" +#endif + +/* PLATFORM SPECIFIC INCLUDES */ + +#include "brg_endian.h" + +/* CONFIGURATION - THE USE OF DEFINES + + Later in this section there are a number of defines that control the + operation of the code. In each section, the purpose of each define is + explained so that the relevant form can be included or excluded by + setting either 1's or 0's respectively on the branches of the related + #if clauses. The following local defines should not be changed. +*/ + +#define ENCRYPTION_IN_C 1 +#define DECRYPTION_IN_C 2 +#define ENC_KEYING_IN_C 4 +#define DEC_KEYING_IN_C 8 + +#define NO_TABLES 0 +#define ONE_TABLE 1 +#define FOUR_TABLES 4 +#define NONE 0 +#define PARTIAL 1 +#define FULL 2 + +/* --- START OF USER CONFIGURED OPTIONS --- */ + +/* 1. BYTE ORDER WITHIN 32 BIT WORDS + + The fundamental data processing units in Rijndael are 8-bit bytes. The + input, output and key input are all enumerated arrays of bytes in which + bytes are numbered starting at zero and increasing to one less than the + number of bytes in the array in question. This enumeration is only used + for naming bytes and does not imply any adjacency or order relationship + from one byte to another. When these inputs and outputs are considered + as bit sequences, bits 8*n to 8*n+7 of the bit sequence are mapped to + byte[n] with bit 8n+i in the sequence mapped to bit 7-i within the byte. + In this implementation bits are numbered from 0 to 7 starting at the + numerically least significant end of each byte (bit n represents 2^n). + + However, Rijndael can be implemented more efficiently using 32-bit + words by packing bytes into words so that bytes 4*n to 4*n+3 are placed + into word[n]. While in principle these bytes can be assembled into words + in any positions, this implementation only supports the two formats in + which bytes in adjacent positions within words also have adjacent byte + numbers. This order is called big-endian if the lowest numbered bytes + in words have the highest numeric significance and little-endian if the + opposite applies. + + This code can work in either order irrespective of the order used by the + machine on which it runs. Normally the internal byte order will be set + to the order of the processor on which the code is to be run but this + define can be used to reverse this in special situations + + WARNING: Assembler code versions rely on PLATFORM_BYTE_ORDER being set. + This define will hence be redefined later (in section 4) if necessary +*/ + +#if 1 +# define ALGORITHM_BYTE_ORDER PLATFORM_BYTE_ORDER +#elif 0 +# define ALGORITHM_BYTE_ORDER IS_LITTLE_ENDIAN +#elif 0 +# define ALGORITHM_BYTE_ORDER IS_BIG_ENDIAN +#else +# error The algorithm byte order is not defined +#endif + +/* 2. VIA ACE SUPPORT */ + +#if defined( __GNUC__ ) && defined( __i386__ ) \ + || defined( _WIN32 ) && defined( _M_IX86 ) \ + && !(defined( _WIN64 ) || defined( _WIN32_WCE ) || defined( _MSC_VER ) && ( _MSC_VER <= 800 )) +# define VIA_ACE_POSSIBLE +#endif + +/* Define this option if support for the VIA ACE is required. This uses + inline assembler instructions and is only implemented for the Microsoft, + Intel and GCC compilers. If VIA ACE is known to be present, then defining + ASSUME_VIA_ACE_PRESENT will remove the ordinary encryption/decryption + code. If USE_VIA_ACE_IF_PRESENT is defined then VIA ACE will be used if + it is detected (both present and enabled) but the normal AES code will + also be present. + + When VIA ACE is to be used, all AES encryption contexts MUST be 16 byte + aligned; other input/output buffers do not need to be 16 byte aligned + but there are very large performance gains if this can be arranged. + VIA ACE also requires the decryption key schedule to be in reverse + order (which later checks below ensure). +*/ + +#if 1 && defined( VIA_ACE_POSSIBLE ) && !defined( USE_VIA_ACE_IF_PRESENT ) +# define USE_VIA_ACE_IF_PRESENT +#endif + +#if 0 && defined( VIA_ACE_POSSIBLE ) && !defined( ASSUME_VIA_ACE_PRESENT ) +# define ASSUME_VIA_ACE_PRESENT +# endif + +/* 3. ASSEMBLER SUPPORT + + This define (which can be on the command line) enables the use of the + assembler code routines for encryption, decryption and key scheduling + as follows: + + ASM_X86_V1C uses the assembler (aes_x86_v1.asm) with large tables for + encryption and decryption and but with key scheduling in C + ASM_X86_V2 uses assembler (aes_x86_v2.asm) with compressed tables for + encryption, decryption and key scheduling + ASM_X86_V2C uses assembler (aes_x86_v2.asm) with compressed tables for + encryption and decryption and but with key scheduling in C + ASM_AMD64_C uses assembler (aes_amd64.asm) with compressed tables for + encryption and decryption and but with key scheduling in C + + Change one 'if 0' below to 'if 1' to select the version or define + as a compilation option. +*/ + +#if 0 && !defined( ASM_X86_V1C ) +# define ASM_X86_V1C +#elif 0 && !defined( ASM_X86_V2 ) +# define ASM_X86_V2 +#elif 0 && !defined( ASM_X86_V2C ) +# define ASM_X86_V2C +#elif 0 && !defined( ASM_AMD64_C ) +# define ASM_AMD64_C +#endif + +#if (defined ( ASM_X86_V1C ) || defined( ASM_X86_V2 ) || defined( ASM_X86_V2C )) \ + && !defined( _M_IX86 ) || defined( ASM_AMD64_C ) && !defined( _M_X64 ) +# error Assembler code is only available for x86 and AMD64 systems +#endif + +/* 4. FAST INPUT/OUTPUT OPERATIONS. + + On some machines it is possible to improve speed by transferring the + bytes in the input and output arrays to and from the internal 32-bit + variables by addressing these arrays as if they are arrays of 32-bit + words. On some machines this will always be possible but there may + be a large performance penalty if the byte arrays are not aligned on + the normal word boundaries. On other machines this technique will + lead to memory access errors when such 32-bit word accesses are not + properly aligned. The option SAFE_IO avoids such problems but will + often be slower on those machines that support misaligned access + (especially so if care is taken to align the input and output byte + arrays on 32-bit word boundaries). If SAFE_IO is not defined it is + assumed that access to byte arrays as if they are arrays of 32-bit + words will not cause problems when such accesses are misaligned. +*/ +#if 1 && !defined( _MSC_VER ) +# define SAFE_IO +#endif + +/* 5. LOOP UNROLLING + + The code for encryption and decrytpion cycles through a number of rounds + that can be implemented either in a loop or by expanding the code into a + long sequence of instructions, the latter producing a larger program but + one that will often be much faster. The latter is called loop unrolling. + There are also potential speed advantages in expanding two iterations in + a loop with half the number of iterations, which is called partial loop + unrolling. The following options allow partial or full loop unrolling + to be set independently for encryption and decryption +*/ +#if 1 +# define ENC_UNROLL FULL +#elif 0 +# define ENC_UNROLL PARTIAL +#else +# define ENC_UNROLL NONE +#endif + +#if 1 +# define DEC_UNROLL FULL +#elif 0 +# define DEC_UNROLL PARTIAL +#else +# define DEC_UNROLL NONE +#endif + +#if 1 +# define ENC_KS_UNROLL +#endif + +#if 1 +# define DEC_KS_UNROLL +#endif + +/* 6. FAST FINITE FIELD OPERATIONS + + If this section is included, tables are used to provide faster finite + field arithmetic (this has no effect if FIXED_TABLES is defined). +*/ +#if 1 +# define FF_TABLES +#endif + +/* 7. INTERNAL STATE VARIABLE FORMAT + + The internal state of Rijndael is stored in a number of local 32-bit + word varaibles which can be defined either as an array or as individual + names variables. Include this section if you want to store these local + varaibles in arrays. Otherwise individual local variables will be used. +*/ +#if 1 +# define ARRAYS +#endif + +/* 8. FIXED OR DYNAMIC TABLES + + When this section is included the tables used by the code are compiled + statically into the binary file. Otherwise the subroutine aes_init() + must be called to compute them before the code is first used. +*/ +#if 1 && !(defined( _MSC_VER ) && ( _MSC_VER <= 800 )) +# define FIXED_TABLES +#endif + +/* 9. MASKING OR CASTING FROM LONGER VALUES TO BYTES + + In some systems it is better to mask longer values to extract bytes + rather than using a cast. This option allows this choice. +*/ +#if 0 +# define to_byte(x) ((uint_8t)(x)) +#else +# define to_byte(x) ((x) & 0xff) +#endif + +/* 10. TABLE ALIGNMENT + + On some sytsems speed will be improved by aligning the AES large lookup + tables on particular boundaries. This define should be set to a power of + two giving the desired alignment. It can be left undefined if alignment + is not needed. This option is specific to the Microsft VC++ compiler - + it seems to sometimes cause trouble for the VC++ version 6 compiler. +*/ + +#if 1 && defined( _MSC_VER ) && ( _MSC_VER >= 1300 ) +# define TABLE_ALIGN 32 +#endif + +/* 11. REDUCE CODE AND TABLE SIZE + + This replaces some expanded macros with function calls if AES_ASM_V2 or + AES_ASM_V2C are defined +*/ + +#if 1 && (defined( ASM_X86_V2 ) || defined( ASM_X86_V2C )) +# define REDUCE_CODE_SIZE +#endif + +/* 12. TABLE OPTIONS + + This cipher proceeds by repeating in a number of cycles known as 'rounds' + which are implemented by a round function which can optionally be speeded + up using tables. The basic tables are each 256 32-bit words, with either + one or four tables being required for each round function depending on + how much speed is required. The encryption and decryption round functions + are different and the last encryption and decrytpion round functions are + different again making four different round functions in all. + + This means that: + 1. Normal encryption and decryption rounds can each use either 0, 1 + or 4 tables and table spaces of 0, 1024 or 4096 bytes each. + 2. The last encryption and decryption rounds can also use either 0, 1 + or 4 tables and table spaces of 0, 1024 or 4096 bytes each. + + Include or exclude the appropriate definitions below to set the number + of tables used by this implementation. +*/ + +#if 1 /* set tables for the normal encryption round */ +# define ENC_ROUND FOUR_TABLES +#elif 0 +# define ENC_ROUND ONE_TABLE +#else +# define ENC_ROUND NO_TABLES +#endif + +#if 1 /* set tables for the last encryption round */ +# define LAST_ENC_ROUND FOUR_TABLES +#elif 0 +# define LAST_ENC_ROUND ONE_TABLE +#else +# define LAST_ENC_ROUND NO_TABLES +#endif + +#if 1 /* set tables for the normal decryption round */ +# define DEC_ROUND FOUR_TABLES +#elif 0 +# define DEC_ROUND ONE_TABLE +#else +# define DEC_ROUND NO_TABLES +#endif + +#if 1 /* set tables for the last decryption round */ +# define LAST_DEC_ROUND FOUR_TABLES +#elif 0 +# define LAST_DEC_ROUND ONE_TABLE +#else +# define LAST_DEC_ROUND NO_TABLES +#endif + +/* The decryption key schedule can be speeded up with tables in the same + way that the round functions can. Include or exclude the following + defines to set this requirement. +*/ +#if 1 +# define KEY_SCHED FOUR_TABLES +#elif 0 +# define KEY_SCHED ONE_TABLE +#else +# define KEY_SCHED NO_TABLES +#endif + +/* ---- END OF USER CONFIGURED OPTIONS ---- */ + +/* VIA ACE support is only available for VC++ and GCC */ + +#if !defined( _MSC_VER ) && !defined( __GNUC__ ) +# if defined( ASSUME_VIA_ACE_PRESENT ) +# undef ASSUME_VIA_ACE_PRESENT +# endif +# if defined( USE_VIA_ACE_IF_PRESENT ) +# undef USE_VIA_ACE_IF_PRESENT +# endif +#endif + +#if defined( ASSUME_VIA_ACE_PRESENT ) && !defined( USE_VIA_ACE_IF_PRESENT ) +# define USE_VIA_ACE_IF_PRESENT +#endif + +#if defined( USE_VIA_ACE_IF_PRESENT ) && !defined ( AES_REV_DKS ) +# define AES_REV_DKS +#endif + +/* ********** UNDEF - we don't use VIA stuff ****************** */ +#undef USE_VIA_ACE_IF_PRESENT + +/* Assembler support requires the use of platform byte order */ + +#if ( defined( ASM_X86_V1C ) || defined( ASM_X86_V2C ) || defined( ASM_AMD64_C ) ) \ + && (ALGORITHM_BYTE_ORDER != PLATFORM_BYTE_ORDER) +# undef ALGORITHM_BYTE_ORDER +# define ALGORITHM_BYTE_ORDER PLATFORM_BYTE_ORDER +#endif + +/* In this implementation the columns of the state array are each held in + 32-bit words. The state array can be held in various ways: in an array + of words, in a number of individual word variables or in a number of + processor registers. The following define maps a variable name x and + a column number c to the way the state array variable is to be held. + The first define below maps the state into an array x[c] whereas the + second form maps the state into a number of individual variables x0, + x1, etc. Another form could map individual state colums to machine + register names. +*/ + +#if defined( ARRAYS ) +# define s(x,c) x[c] +#else +# define s(x,c) x##c +#endif + +/* This implementation provides subroutines for encryption, decryption + and for setting the three key lengths (separately) for encryption + and decryption. Since not all functions are needed, masks are set + up here to determine which will be implemented in C +*/ + +#if !defined( AES_ENCRYPT ) +# define EFUNCS_IN_C 0 +#elif defined( ASSUME_VIA_ACE_PRESENT ) || defined( ASM_X86_V1C ) \ + || defined( ASM_X86_V2C ) || defined( ASM_AMD64_C ) +# define EFUNCS_IN_C ENC_KEYING_IN_C +#elif !defined( ASM_X86_V2 ) +# define EFUNCS_IN_C ( ENCRYPTION_IN_C | ENC_KEYING_IN_C ) +#else +# define EFUNCS_IN_C 0 +#endif + +#if !defined( AES_DECRYPT ) +# define DFUNCS_IN_C 0 +#elif defined( ASSUME_VIA_ACE_PRESENT ) || defined( ASM_X86_V1C ) \ + || defined( ASM_X86_V2C ) || defined( ASM_AMD64_C ) +# define DFUNCS_IN_C DEC_KEYING_IN_C +#elif !defined( ASM_X86_V2 ) +# define DFUNCS_IN_C ( DECRYPTION_IN_C | DEC_KEYING_IN_C ) +#else +# define DFUNCS_IN_C 0 +#endif + +#define FUNCS_IN_C ( EFUNCS_IN_C | DFUNCS_IN_C ) + +/* END OF CONFIGURATION OPTIONS */ + +#define RC_LENGTH (5 * (AES_BLOCK_SIZE / 4 - 2)) + +/* Disable or report errors on some combinations of options */ + +#if ENC_ROUND == NO_TABLES && LAST_ENC_ROUND != NO_TABLES +# undef LAST_ENC_ROUND +# define LAST_ENC_ROUND NO_TABLES +#elif ENC_ROUND == ONE_TABLE && LAST_ENC_ROUND == FOUR_TABLES +# undef LAST_ENC_ROUND +# define LAST_ENC_ROUND ONE_TABLE +#endif + +#if ENC_ROUND == NO_TABLES && ENC_UNROLL != NONE +# undef ENC_UNROLL +# define ENC_UNROLL NONE +#endif + +#if DEC_ROUND == NO_TABLES && LAST_DEC_ROUND != NO_TABLES +# undef LAST_DEC_ROUND +# define LAST_DEC_ROUND NO_TABLES +#elif DEC_ROUND == ONE_TABLE && LAST_DEC_ROUND == FOUR_TABLES +# undef LAST_DEC_ROUND +# define LAST_DEC_ROUND ONE_TABLE +#endif + +#if DEC_ROUND == NO_TABLES && DEC_UNROLL != NONE +# undef DEC_UNROLL +# define DEC_UNROLL NONE +#endif + +#if defined( bswap32 ) +# define aes_sw32 bswap32 +#elif defined( bswap_32 ) +# define aes_sw32 bswap_32 +#else +# define brot(x,n) (((uint_32t)(x) << n) | ((uint_32t)(x) >> (32 - n))) +# define aes_sw32(x) ((brot((x),8) & 0x00ff00ff) | (brot((x),24) & 0xff00ff00)) +#endif + +/* upr(x,n): rotates bytes within words by n positions, moving bytes to + higher index positions with wrap around into low positions + ups(x,n): moves bytes by n positions to higher index positions in + words but without wrap around + bval(x,n): extracts a byte from a word + + WARNING: The definitions given here are intended only for use with + unsigned variables and with shift counts that are compile + time constants +*/ + +#if ( ALGORITHM_BYTE_ORDER == IS_LITTLE_ENDIAN ) +# define upr(x,n) (((uint_32t)(x) << (8 * (n))) | ((uint_32t)(x) >> (32 - 8 * (n)))) +# define ups(x,n) ((uint_32t) (x) << (8 * (n))) +# define bval(x,n) to_byte((x) >> (8 * (n))) +# define bytes2word(b0, b1, b2, b3) \ + (((uint_32t)(b3) << 24) | ((uint_32t)(b2) << 16) | ((uint_32t)(b1) << 8) | (b0)) +#endif + +#if ( ALGORITHM_BYTE_ORDER == IS_BIG_ENDIAN ) +# define upr(x,n) (((uint_32t)(x) >> (8 * (n))) | ((uint_32t)(x) << (32 - 8 * (n)))) +# define ups(x,n) ((uint_32t) (x) >> (8 * (n))) +# define bval(x,n) to_byte((x) >> (24 - 8 * (n))) +# define bytes2word(b0, b1, b2, b3) \ + (((uint_32t)(b0) << 24) | ((uint_32t)(b1) << 16) | ((uint_32t)(b2) << 8) | (b3)) +#endif + +#if defined( SAFE_IO ) +# define word_in(x,c) bytes2word(((const uint_8t*)(x)+4*c)[0], ((const uint_8t*)(x)+4*c)[1], \ + ((const uint_8t*)(x)+4*c)[2], ((const uint_8t*)(x)+4*c)[3]) +# define word_out(x,c,v) { ((uint_8t*)(x)+4*c)[0] = bval(v,0); ((uint_8t*)(x)+4*c)[1] = bval(v,1); \ + ((uint_8t*)(x)+4*c)[2] = bval(v,2); ((uint_8t*)(x)+4*c)[3] = bval(v,3); } +#elif ( ALGORITHM_BYTE_ORDER == PLATFORM_BYTE_ORDER ) +# define word_in(x,c) (*((uint_32t*)(x)+(c))) +# define word_out(x,c,v) (*((uint_32t*)(x)+(c)) = (v)) +#else +# define word_in(x,c) aes_sw32(*((uint_32t*)(x)+(c))) +# define word_out(x,c,v) (*((uint_32t*)(x)+(c)) = aes_sw32(v)) +#endif + +/* the finite field modular polynomial and elements */ + +#define WPOLY 0x011b +#define BPOLY 0x1b + +/* multiply four bytes in GF(2^8) by 'x' {02} in parallel */ + +#define gf_c1 0x80808080 +#define gf_c2 0x7f7f7f7f +#define gf_mulx(x) ((((x) & gf_c2) << 1) ^ ((((x) & gf_c1) >> 7) * BPOLY)) + +/* The following defines provide alternative definitions of gf_mulx that might + give improved performance if a fast 32-bit multiply is not available. Note + that a temporary variable u needs to be defined where gf_mulx is used. + +#define gf_mulx(x) (u = (x) & gf_c1, u |= (u >> 1), ((x) & gf_c2) << 1) ^ ((u >> 3) | (u >> 6)) +#define gf_c4 (0x01010101 * BPOLY) +#define gf_mulx(x) (u = (x) & gf_c1, ((x) & gf_c2) << 1) ^ ((u - (u >> 7)) & gf_c4) +*/ + +/* Work out which tables are needed for the different options */ + +#if defined( ASM_X86_V1C ) +# if defined( ENC_ROUND ) +# undef ENC_ROUND +# endif +# define ENC_ROUND FOUR_TABLES +# if defined( LAST_ENC_ROUND ) +# undef LAST_ENC_ROUND +# endif +# define LAST_ENC_ROUND FOUR_TABLES +# if defined( DEC_ROUND ) +# undef DEC_ROUND +# endif +# define DEC_ROUND FOUR_TABLES +# if defined( LAST_DEC_ROUND ) +# undef LAST_DEC_ROUND +# endif +# define LAST_DEC_ROUND FOUR_TABLES +# if defined( KEY_SCHED ) +# undef KEY_SCHED +# define KEY_SCHED FOUR_TABLES +# endif +#endif + +#if ( FUNCS_IN_C & ENCRYPTION_IN_C ) || defined( ASM_X86_V1C ) +# if ENC_ROUND == ONE_TABLE +# define FT1_SET +# elif ENC_ROUND == FOUR_TABLES +# define FT4_SET +# else +# define SBX_SET +# endif +# if LAST_ENC_ROUND == ONE_TABLE +# define FL1_SET +# elif LAST_ENC_ROUND == FOUR_TABLES +# define FL4_SET +# elif !defined( SBX_SET ) +# define SBX_SET +# endif +#endif + +#if ( FUNCS_IN_C & DECRYPTION_IN_C ) || defined( ASM_X86_V1C ) +# if DEC_ROUND == ONE_TABLE +# define IT1_SET +# elif DEC_ROUND == FOUR_TABLES +# define IT4_SET +# else +# define ISB_SET +# endif +# if LAST_DEC_ROUND == ONE_TABLE +# define IL1_SET +# elif LAST_DEC_ROUND == FOUR_TABLES +# define IL4_SET +# elif !defined(ISB_SET) +# define ISB_SET +# endif +#endif + +#if !(defined( REDUCE_CODE_SIZE ) && (defined( ASM_X86_V2 ) || defined( ASM_X86_V2C ))) +# if ((FUNCS_IN_C & ENC_KEYING_IN_C) || (FUNCS_IN_C & DEC_KEYING_IN_C)) +# if KEY_SCHED == ONE_TABLE +# if !defined( FL1_SET ) && !defined( FL4_SET ) +# define LS1_SET +# endif +# elif KEY_SCHED == FOUR_TABLES +# if !defined( FL4_SET ) +# define LS4_SET +# endif +# elif !defined( SBX_SET ) +# define SBX_SET +# endif +# endif +# if (FUNCS_IN_C & DEC_KEYING_IN_C) +# if KEY_SCHED == ONE_TABLE +# define IM1_SET +# elif KEY_SCHED == FOUR_TABLES +# define IM4_SET +# elif !defined( SBX_SET ) +# define SBX_SET +# endif +# endif +#endif + +/* generic definitions of Rijndael macros that use tables */ + +#define no_table(x,box,vf,rf,c) bytes2word( \ + box[bval(vf(x,0,c),rf(0,c))], \ + box[bval(vf(x,1,c),rf(1,c))], \ + box[bval(vf(x,2,c),rf(2,c))], \ + box[bval(vf(x,3,c),rf(3,c))]) + +#define one_table(x,op,tab,vf,rf,c) \ + ( tab[bval(vf(x,0,c),rf(0,c))] \ + ^ op(tab[bval(vf(x,1,c),rf(1,c))],1) \ + ^ op(tab[bval(vf(x,2,c),rf(2,c))],2) \ + ^ op(tab[bval(vf(x,3,c),rf(3,c))],3)) + +#define four_tables(x,tab,vf,rf,c) \ + ( tab[0][bval(vf(x,0,c),rf(0,c))] \ + ^ tab[1][bval(vf(x,1,c),rf(1,c))] \ + ^ tab[2][bval(vf(x,2,c),rf(2,c))] \ + ^ tab[3][bval(vf(x,3,c),rf(3,c))]) + +#define vf1(x,r,c) (x) +#define rf1(r,c) (r) +#define rf2(r,c) ((8+r-c)&3) + +/* perform forward and inverse column mix operation on four bytes in long word x in */ +/* parallel. NOTE: x must be a simple variable, NOT an expression in these macros. */ + +#if !(defined( REDUCE_CODE_SIZE ) && (defined( ASM_X86_V2 ) || defined( ASM_X86_V2C ))) + +#if defined( FM4_SET ) /* not currently used */ +# define fwd_mcol(x) four_tables(x,t_use(f,m),vf1,rf1,0) +#elif defined( FM1_SET ) /* not currently used */ +# define fwd_mcol(x) one_table(x,upr,t_use(f,m),vf1,rf1,0) +#else +# define dec_fmvars uint_32t g2 +# define fwd_mcol(x) (g2 = gf_mulx(x), g2 ^ upr((x) ^ g2, 3) ^ upr((x), 2) ^ upr((x), 1)) +#endif + +#if defined( IM4_SET ) +# define inv_mcol(x) four_tables(x,t_use(i,m),vf1,rf1,0) +#elif defined( IM1_SET ) +# define inv_mcol(x) one_table(x,upr,t_use(i,m),vf1,rf1,0) +#else +# define dec_imvars uint_32t g2, g4, g9 +# define inv_mcol(x) (g2 = gf_mulx(x), g4 = gf_mulx(g2), g9 = (x) ^ gf_mulx(g4), g4 ^= g9, \ + (x) ^ g2 ^ g4 ^ upr(g2 ^ g9, 3) ^ upr(g4, 2) ^ upr(g9, 1)) +#endif + +#if defined( FL4_SET ) +# define ls_box(x,c) four_tables(x,t_use(f,l),vf1,rf2,c) +#elif defined( LS4_SET ) +# define ls_box(x,c) four_tables(x,t_use(l,s),vf1,rf2,c) +#elif defined( FL1_SET ) +# define ls_box(x,c) one_table(x,upr,t_use(f,l),vf1,rf2,c) +#elif defined( LS1_SET ) +# define ls_box(x,c) one_table(x,upr,t_use(l,s),vf1,rf2,c) +#else +# define ls_box(x,c) no_table(x,t_use(s,box),vf1,rf2,c) +#endif + +#endif + +#if defined( ASM_X86_V1C ) && defined( AES_DECRYPT ) && !defined( ISB_SET ) +# define ISB_SET +#endif + +#endif diff --git a/libs/libks/crypt/aestab.c b/libs/libks/crypt/aestab.c new file mode 100644 index 0000000000..4a6e43247e --- /dev/null +++ b/libs/libks/crypt/aestab.c @@ -0,0 +1,391 @@ +/* +--------------------------------------------------------------------------- +Copyright (c) 1998-2010, Brian Gladman, Worcester, UK. All rights reserved. + +The redistribution and use of this software (with or without changes) +is allowed without the payment of fees or royalties provided that: + + source code distributions include the above copyright notice, this + list of conditions and the following disclaimer; + + binary distributions include the above copyright notice, this list + of conditions and the following disclaimer in their documentation. + +This software is provided 'as is' with no explicit or implied warranties +in respect of its operation, including, but not limited to, correctness +and fitness for purpose. +--------------------------------------------------------------------------- +Issue Date: 20/12/2007 +*/ + +#define DO_TABLES + +#include "aes.h" +#include "aesopt.h" + +#if defined(FIXED_TABLES) + +#define sb_data(w) {\ + w(0x63), w(0x7c), w(0x77), w(0x7b), w(0xf2), w(0x6b), w(0x6f), w(0xc5),\ + w(0x30), w(0x01), w(0x67), w(0x2b), w(0xfe), w(0xd7), w(0xab), w(0x76),\ + w(0xca), w(0x82), w(0xc9), w(0x7d), w(0xfa), w(0x59), w(0x47), w(0xf0),\ + w(0xad), w(0xd4), w(0xa2), w(0xaf), w(0x9c), w(0xa4), w(0x72), w(0xc0),\ + w(0xb7), w(0xfd), w(0x93), w(0x26), w(0x36), w(0x3f), w(0xf7), w(0xcc),\ + w(0x34), w(0xa5), w(0xe5), w(0xf1), w(0x71), w(0xd8), w(0x31), w(0x15),\ + w(0x04), w(0xc7), w(0x23), w(0xc3), w(0x18), w(0x96), w(0x05), w(0x9a),\ + w(0x07), w(0x12), w(0x80), w(0xe2), w(0xeb), w(0x27), w(0xb2), w(0x75),\ + w(0x09), w(0x83), w(0x2c), w(0x1a), w(0x1b), w(0x6e), w(0x5a), w(0xa0),\ + w(0x52), w(0x3b), w(0xd6), w(0xb3), w(0x29), w(0xe3), w(0x2f), w(0x84),\ + w(0x53), w(0xd1), w(0x00), w(0xed), w(0x20), w(0xfc), w(0xb1), w(0x5b),\ + w(0x6a), w(0xcb), w(0xbe), w(0x39), w(0x4a), w(0x4c), w(0x58), w(0xcf),\ + w(0xd0), w(0xef), w(0xaa), w(0xfb), w(0x43), w(0x4d), w(0x33), w(0x85),\ + w(0x45), w(0xf9), w(0x02), w(0x7f), w(0x50), w(0x3c), w(0x9f), w(0xa8),\ + w(0x51), w(0xa3), w(0x40), w(0x8f), w(0x92), w(0x9d), w(0x38), w(0xf5),\ + w(0xbc), w(0xb6), w(0xda), w(0x21), w(0x10), w(0xff), w(0xf3), w(0xd2),\ + w(0xcd), w(0x0c), w(0x13), w(0xec), w(0x5f), w(0x97), w(0x44), w(0x17),\ + w(0xc4), w(0xa7), w(0x7e), w(0x3d), w(0x64), w(0x5d), w(0x19), w(0x73),\ + w(0x60), w(0x81), w(0x4f), w(0xdc), w(0x22), w(0x2a), w(0x90), w(0x88),\ + w(0x46), w(0xee), w(0xb8), w(0x14), w(0xde), w(0x5e), w(0x0b), w(0xdb),\ + w(0xe0), w(0x32), w(0x3a), w(0x0a), w(0x49), w(0x06), w(0x24), w(0x5c),\ + w(0xc2), w(0xd3), w(0xac), w(0x62), w(0x91), w(0x95), w(0xe4), w(0x79),\ + w(0xe7), w(0xc8), w(0x37), w(0x6d), w(0x8d), w(0xd5), w(0x4e), w(0xa9),\ + w(0x6c), w(0x56), w(0xf4), w(0xea), w(0x65), w(0x7a), w(0xae), w(0x08),\ + w(0xba), w(0x78), w(0x25), w(0x2e), w(0x1c), w(0xa6), w(0xb4), w(0xc6),\ + w(0xe8), w(0xdd), w(0x74), w(0x1f), w(0x4b), w(0xbd), w(0x8b), w(0x8a),\ + w(0x70), w(0x3e), w(0xb5), w(0x66), w(0x48), w(0x03), w(0xf6), w(0x0e),\ + w(0x61), w(0x35), w(0x57), w(0xb9), w(0x86), w(0xc1), w(0x1d), w(0x9e),\ + w(0xe1), w(0xf8), w(0x98), w(0x11), w(0x69), w(0xd9), w(0x8e), w(0x94),\ + w(0x9b), w(0x1e), w(0x87), w(0xe9), w(0xce), w(0x55), w(0x28), w(0xdf),\ + w(0x8c), w(0xa1), w(0x89), w(0x0d), w(0xbf), w(0xe6), w(0x42), w(0x68),\ + w(0x41), w(0x99), w(0x2d), w(0x0f), w(0xb0), w(0x54), w(0xbb), w(0x16) } + +#define isb_data(w) {\ + w(0x52), w(0x09), w(0x6a), w(0xd5), w(0x30), w(0x36), w(0xa5), w(0x38),\ + w(0xbf), w(0x40), w(0xa3), w(0x9e), w(0x81), w(0xf3), w(0xd7), w(0xfb),\ + w(0x7c), w(0xe3), w(0x39), w(0x82), w(0x9b), w(0x2f), w(0xff), w(0x87),\ + w(0x34), w(0x8e), w(0x43), w(0x44), w(0xc4), w(0xde), w(0xe9), w(0xcb),\ + w(0x54), w(0x7b), w(0x94), w(0x32), w(0xa6), w(0xc2), w(0x23), w(0x3d),\ + w(0xee), w(0x4c), w(0x95), w(0x0b), w(0x42), w(0xfa), w(0xc3), w(0x4e),\ + w(0x08), w(0x2e), w(0xa1), w(0x66), w(0x28), w(0xd9), w(0x24), w(0xb2),\ + w(0x76), w(0x5b), w(0xa2), w(0x49), w(0x6d), w(0x8b), w(0xd1), w(0x25),\ + w(0x72), w(0xf8), w(0xf6), w(0x64), w(0x86), w(0x68), w(0x98), w(0x16),\ + w(0xd4), w(0xa4), w(0x5c), w(0xcc), w(0x5d), w(0x65), w(0xb6), w(0x92),\ + w(0x6c), w(0x70), w(0x48), w(0x50), w(0xfd), w(0xed), w(0xb9), w(0xda),\ + w(0x5e), w(0x15), w(0x46), w(0x57), w(0xa7), w(0x8d), w(0x9d), w(0x84),\ + w(0x90), w(0xd8), w(0xab), w(0x00), w(0x8c), w(0xbc), w(0xd3), w(0x0a),\ + w(0xf7), w(0xe4), w(0x58), w(0x05), w(0xb8), w(0xb3), w(0x45), w(0x06),\ + w(0xd0), w(0x2c), w(0x1e), w(0x8f), w(0xca), w(0x3f), w(0x0f), w(0x02),\ + w(0xc1), w(0xaf), w(0xbd), w(0x03), w(0x01), w(0x13), w(0x8a), w(0x6b),\ + w(0x3a), w(0x91), w(0x11), w(0x41), w(0x4f), w(0x67), w(0xdc), w(0xea),\ + w(0x97), w(0xf2), w(0xcf), w(0xce), w(0xf0), w(0xb4), w(0xe6), w(0x73),\ + w(0x96), w(0xac), w(0x74), w(0x22), w(0xe7), w(0xad), w(0x35), w(0x85),\ + w(0xe2), w(0xf9), w(0x37), w(0xe8), w(0x1c), w(0x75), w(0xdf), w(0x6e),\ + w(0x47), w(0xf1), w(0x1a), w(0x71), w(0x1d), w(0x29), w(0xc5), w(0x89),\ + w(0x6f), w(0xb7), w(0x62), w(0x0e), w(0xaa), w(0x18), w(0xbe), w(0x1b),\ + w(0xfc), w(0x56), w(0x3e), w(0x4b), w(0xc6), w(0xd2), w(0x79), w(0x20),\ + w(0x9a), w(0xdb), w(0xc0), w(0xfe), w(0x78), w(0xcd), w(0x5a), w(0xf4),\ + w(0x1f), w(0xdd), w(0xa8), w(0x33), w(0x88), w(0x07), w(0xc7), w(0x31),\ + w(0xb1), w(0x12), w(0x10), w(0x59), w(0x27), w(0x80), w(0xec), w(0x5f),\ + w(0x60), w(0x51), w(0x7f), w(0xa9), w(0x19), w(0xb5), w(0x4a), w(0x0d),\ + w(0x2d), w(0xe5), w(0x7a), w(0x9f), w(0x93), w(0xc9), w(0x9c), w(0xef),\ + w(0xa0), w(0xe0), w(0x3b), w(0x4d), w(0xae), w(0x2a), w(0xf5), w(0xb0),\ + w(0xc8), w(0xeb), w(0xbb), w(0x3c), w(0x83), w(0x53), w(0x99), w(0x61),\ + w(0x17), w(0x2b), w(0x04), w(0x7e), w(0xba), w(0x77), w(0xd6), w(0x26),\ + w(0xe1), w(0x69), w(0x14), w(0x63), w(0x55), w(0x21), w(0x0c), w(0x7d) } + +#define mm_data(w) {\ + w(0x00), w(0x01), w(0x02), w(0x03), w(0x04), w(0x05), w(0x06), w(0x07),\ + w(0x08), w(0x09), w(0x0a), w(0x0b), w(0x0c), w(0x0d), w(0x0e), w(0x0f),\ + w(0x10), w(0x11), w(0x12), w(0x13), w(0x14), w(0x15), w(0x16), w(0x17),\ + w(0x18), w(0x19), w(0x1a), w(0x1b), w(0x1c), w(0x1d), w(0x1e), w(0x1f),\ + w(0x20), w(0x21), w(0x22), w(0x23), w(0x24), w(0x25), w(0x26), w(0x27),\ + w(0x28), w(0x29), w(0x2a), w(0x2b), w(0x2c), w(0x2d), w(0x2e), w(0x2f),\ + w(0x30), w(0x31), w(0x32), w(0x33), w(0x34), w(0x35), w(0x36), w(0x37),\ + w(0x38), w(0x39), w(0x3a), w(0x3b), w(0x3c), w(0x3d), w(0x3e), w(0x3f),\ + w(0x40), w(0x41), w(0x42), w(0x43), w(0x44), w(0x45), w(0x46), w(0x47),\ + w(0x48), w(0x49), w(0x4a), w(0x4b), w(0x4c), w(0x4d), w(0x4e), w(0x4f),\ + w(0x50), w(0x51), w(0x52), w(0x53), w(0x54), w(0x55), w(0x56), w(0x57),\ + w(0x58), w(0x59), w(0x5a), w(0x5b), w(0x5c), w(0x5d), w(0x5e), w(0x5f),\ + w(0x60), w(0x61), w(0x62), w(0x63), w(0x64), w(0x65), w(0x66), w(0x67),\ + w(0x68), w(0x69), w(0x6a), w(0x6b), w(0x6c), w(0x6d), w(0x6e), w(0x6f),\ + w(0x70), w(0x71), w(0x72), w(0x73), w(0x74), w(0x75), w(0x76), w(0x77),\ + w(0x78), w(0x79), w(0x7a), w(0x7b), w(0x7c), w(0x7d), w(0x7e), w(0x7f),\ + w(0x80), w(0x81), w(0x82), w(0x83), w(0x84), w(0x85), w(0x86), w(0x87),\ + w(0x88), w(0x89), w(0x8a), w(0x8b), w(0x8c), w(0x8d), w(0x8e), w(0x8f),\ + w(0x90), w(0x91), w(0x92), w(0x93), w(0x94), w(0x95), w(0x96), w(0x97),\ + w(0x98), w(0x99), w(0x9a), w(0x9b), w(0x9c), w(0x9d), w(0x9e), w(0x9f),\ + w(0xa0), w(0xa1), w(0xa2), w(0xa3), w(0xa4), w(0xa5), w(0xa6), w(0xa7),\ + w(0xa8), w(0xa9), w(0xaa), w(0xab), w(0xac), w(0xad), w(0xae), w(0xaf),\ + w(0xb0), w(0xb1), w(0xb2), w(0xb3), w(0xb4), w(0xb5), w(0xb6), w(0xb7),\ + w(0xb8), w(0xb9), w(0xba), w(0xbb), w(0xbc), w(0xbd), w(0xbe), w(0xbf),\ + w(0xc0), w(0xc1), w(0xc2), w(0xc3), w(0xc4), w(0xc5), w(0xc6), w(0xc7),\ + w(0xc8), w(0xc9), w(0xca), w(0xcb), w(0xcc), w(0xcd), w(0xce), w(0xcf),\ + w(0xd0), w(0xd1), w(0xd2), w(0xd3), w(0xd4), w(0xd5), w(0xd6), w(0xd7),\ + w(0xd8), w(0xd9), w(0xda), w(0xdb), w(0xdc), w(0xdd), w(0xde), w(0xdf),\ + w(0xe0), w(0xe1), w(0xe2), w(0xe3), w(0xe4), w(0xe5), w(0xe6), w(0xe7),\ + w(0xe8), w(0xe9), w(0xea), w(0xeb), w(0xec), w(0xed), w(0xee), w(0xef),\ + w(0xf0), w(0xf1), w(0xf2), w(0xf3), w(0xf4), w(0xf5), w(0xf6), w(0xf7),\ + w(0xf8), w(0xf9), w(0xfa), w(0xfb), w(0xfc), w(0xfd), w(0xfe), w(0xff) } + +#define rc_data(w) {\ + w(0x01), w(0x02), w(0x04), w(0x08), w(0x10),w(0x20), w(0x40), w(0x80),\ + w(0x1b), w(0x36) } + +#define h0(x) (x) + +#define w0(p) bytes2word(p, 0, 0, 0) +#define w1(p) bytes2word(0, p, 0, 0) +#define w2(p) bytes2word(0, 0, p, 0) +#define w3(p) bytes2word(0, 0, 0, p) + +#define u0(p) bytes2word(f2(p), p, p, f3(p)) +#define u1(p) bytes2word(f3(p), f2(p), p, p) +#define u2(p) bytes2word(p, f3(p), f2(p), p) +#define u3(p) bytes2word(p, p, f3(p), f2(p)) + +#define v0(p) bytes2word(fe(p), f9(p), fd(p), fb(p)) +#define v1(p) bytes2word(fb(p), fe(p), f9(p), fd(p)) +#define v2(p) bytes2word(fd(p), fb(p), fe(p), f9(p)) +#define v3(p) bytes2word(f9(p), fd(p), fb(p), fe(p)) + +#endif + +#if defined(FIXED_TABLES) || !defined(FF_TABLES) + +#define f2(x) ((x<<1) ^ (((x>>7) & 1) * WPOLY)) +#define f4(x) ((x<<2) ^ (((x>>6) & 1) * WPOLY) ^ (((x>>6) & 2) * WPOLY)) +#define f8(x) ((x<<3) ^ (((x>>5) & 1) * WPOLY) ^ (((x>>5) & 2) * WPOLY) \ + ^ (((x>>5) & 4) * WPOLY)) +#define f3(x) (f2(x) ^ x) +#define f9(x) (f8(x) ^ x) +#define fb(x) (f8(x) ^ f2(x) ^ x) +#define fd(x) (f8(x) ^ f4(x) ^ x) +#define fe(x) (f8(x) ^ f4(x) ^ f2(x)) + +#else + +#define f2(x) ((x) ? pow[log[x] + 0x19] : 0) +#define f3(x) ((x) ? pow[log[x] + 0x01] : 0) +#define f9(x) ((x) ? pow[log[x] + 0xc7] : 0) +#define fb(x) ((x) ? pow[log[x] + 0x68] : 0) +#define fd(x) ((x) ? pow[log[x] + 0xee] : 0) +#define fe(x) ((x) ? pow[log[x] + 0xdf] : 0) + +#endif + +#include "aestab.h" + +#if defined(__cplusplus) +extern "C" +{ +#endif + +#if defined(FIXED_TABLES) + +/* implemented in case of wrong call for fixed tables */ + +AES_RETURN ks_aes_init(void) +{ + return EXIT_SUCCESS; +} + +#else /* Generate the tables for the dynamic table option */ + +#if defined(FF_TABLES) + +#define gf_inv(x) ((x) ? pow[ 255 - log[x]] : 0) + +#else + +/* It will generally be sensible to use tables to compute finite + field multiplies and inverses but where memory is scarse this + code might sometimes be better. But it only has effect during + initialisation so its pretty unimportant in overall terms. +*/ + +/* return 2 ^ (n - 1) where n is the bit number of the highest bit + set in x with x in the range 1 < x < 0x00000200. This form is + used so that locals within fi can be bytes rather than words +*/ + +static uint_8t hibit(const uint_32t x) +{ uint_8t r = (uint_8t)((x >> 1) | (x >> 2)); + + r |= (r >> 2); + r |= (r >> 4); + return (r + 1) >> 1; +} + +/* return the inverse of the finite field element x */ + +static uint_8t gf_inv(const uint_8t x) +{ uint_8t p1 = x, p2 = BPOLY, n1 = hibit(x), n2 = 0x80, v1 = 1, v2 = 0; + + if(x < 2) + return x; + + for( ; ; ) + { + if(n1) + while(n2 >= n1) /* divide polynomial p2 by p1 */ + { + n2 /= n1; /* shift smaller polynomial left */ + p2 ^= (p1 * n2) & 0xff; /* and remove from larger one */ + v2 ^= v1 * n2; /* shift accumulated value and */ + n2 = hibit(p2); /* add into result */ + } + else + return v1; + + if(n2) /* repeat with values swapped */ + while(n1 >= n2) + { + n1 /= n2; + p1 ^= p2 * n1; + v1 ^= v2 * n1; + n1 = hibit(p1); + } + else + return v2; + } +} + +#endif + +/* The forward and inverse affine transformations used in the S-box */ +uint_8t fwd_affine(const uint_8t x) +{ uint_32t w = x; + w ^= (w << 1) ^ (w << 2) ^ (w << 3) ^ (w << 4); + return 0x63 ^ ((w ^ (w >> 8)) & 0xff); +} + +uint_8t inv_affine(const uint_8t x) +{ uint_32t w = x; + w = (w << 1) ^ (w << 3) ^ (w << 6); + return 0x05 ^ ((w ^ (w >> 8)) & 0xff); +} + +static int init = 0; + +AES_RETURN ks_aes_init(void) +{ uint_32t i, w; + +#if defined(FF_TABLES) + + uint_8t pow[512], log[256]; + + if(init) + return EXIT_SUCCESS; + /* log and power tables for GF(2^8) finite field with + WPOLY as modular polynomial - the simplest primitive + root is 0x03, used here to generate the tables + */ + + i = 0; w = 1; + do + { + pow[i] = (uint_8t)w; + pow[i + 255] = (uint_8t)w; + log[w] = (uint_8t)i++; + w ^= (w << 1) ^ (w & 0x80 ? WPOLY : 0); + } + while (w != 1); + +#else + if(init) + return EXIT_SUCCESS; +#endif + + for(i = 0, w = 1; i < RC_LENGTH; ++i) + { + t_set(r,c)[i] = bytes2word(w, 0, 0, 0); + w = f2(w); + } + + for(i = 0; i < 256; ++i) + { uint_8t b; + + b = fwd_affine(gf_inv((uint_8t)i)); + w = bytes2word(f2(b), b, b, f3(b)); + +#if defined( SBX_SET ) + t_set(s,box)[i] = b; +#endif + +#if defined( FT1_SET ) /* tables for a normal encryption round */ + t_set(f,n)[i] = w; +#endif +#if defined( FT4_SET ) + t_set(f,n)[0][i] = w; + t_set(f,n)[1][i] = upr(w,1); + t_set(f,n)[2][i] = upr(w,2); + t_set(f,n)[3][i] = upr(w,3); +#endif + w = bytes2word(b, 0, 0, 0); + +#if defined( FL1_SET ) /* tables for last encryption round (may also */ + t_set(f,l)[i] = w; /* be used in the key schedule) */ +#endif +#if defined( FL4_SET ) + t_set(f,l)[0][i] = w; + t_set(f,l)[1][i] = upr(w,1); + t_set(f,l)[2][i] = upr(w,2); + t_set(f,l)[3][i] = upr(w,3); +#endif + +#if defined( LS1_SET ) /* table for key schedule if t_set(f,l) above is*/ + t_set(l,s)[i] = w; /* not of the required form */ +#endif +#if defined( LS4_SET ) + t_set(l,s)[0][i] = w; + t_set(l,s)[1][i] = upr(w,1); + t_set(l,s)[2][i] = upr(w,2); + t_set(l,s)[3][i] = upr(w,3); +#endif + + b = gf_inv(inv_affine((uint_8t)i)); + w = bytes2word(fe(b), f9(b), fd(b), fb(b)); + +#if defined( IM1_SET ) /* tables for the inverse mix column operation */ + t_set(i,m)[b] = w; +#endif +#if defined( IM4_SET ) + t_set(i,m)[0][b] = w; + t_set(i,m)[1][b] = upr(w,1); + t_set(i,m)[2][b] = upr(w,2); + t_set(i,m)[3][b] = upr(w,3); +#endif + +#if defined( ISB_SET ) + t_set(i,box)[i] = b; +#endif +#if defined( IT1_SET ) /* tables for a normal decryption round */ + t_set(i,n)[i] = w; +#endif +#if defined( IT4_SET ) + t_set(i,n)[0][i] = w; + t_set(i,n)[1][i] = upr(w,1); + t_set(i,n)[2][i] = upr(w,2); + t_set(i,n)[3][i] = upr(w,3); +#endif + w = bytes2word(b, 0, 0, 0); +#if defined( IL1_SET ) /* tables for last decryption round */ + t_set(i,l)[i] = w; +#endif +#if defined( IL4_SET ) + t_set(i,l)[0][i] = w; + t_set(i,l)[1][i] = upr(w,1); + t_set(i,l)[2][i] = upr(w,2); + t_set(i,l)[3][i] = upr(w,3); +#endif + } + init = 1; + return EXIT_SUCCESS; +} + +#endif + +#if defined(__cplusplus) +} +#endif + diff --git a/libs/libks/crypt/aestab.h b/libs/libks/crypt/aestab.h new file mode 100644 index 0000000000..de685679dc --- /dev/null +++ b/libs/libks/crypt/aestab.h @@ -0,0 +1,173 @@ +/* +--------------------------------------------------------------------------- +Copyright (c) 1998-2010, Brian Gladman, Worcester, UK. All rights reserved. + +The redistribution and use of this software (with or without changes) +is allowed without the payment of fees or royalties provided that: + + source code distributions include the above copyright notice, this + list of conditions and the following disclaimer; + + binary distributions include the above copyright notice, this list + of conditions and the following disclaimer in their documentation. + +This software is provided 'as is' with no explicit or implied warranties +in respect of its operation, including, but not limited to, correctness +and fitness for purpose. +--------------------------------------------------------------------------- +Issue Date: 20/12/2007 + + This file contains the code for declaring the tables needed to implement + AES. The file aesopt.h is assumed to be included before this header file. + If there are no global variables, the definitions here can be used to put + the AES tables in a structure so that a pointer can then be added to the + AES context to pass them to the AES routines that need them. If this + facility is used, the calling program has to ensure that this pointer is + managed appropriately. In particular, the value of the t_dec(in,it) item + in the table structure must be set to zero in order to ensure that the + tables are initialised. In practice the three code sequences in aeskey.c + that control the calls to aes_init() and the aes_init() routine itself will + have to be changed for a specific implementation. If global variables are + available it will generally be preferable to use them with the precomputed + FIXED_TABLES option that uses static global tables. + + The following defines can be used to control the way the tables + are defined, initialised and used in embedded environments that + require special features for these purposes + + the 't_dec' construction is used to declare fixed table arrays + the 't_set' construction is used to set fixed table values + the 't_use' construction is used to access fixed table values + + 256 byte tables: + + t_xxx(s,box) => forward S box + t_xxx(i,box) => inverse S box + + 256 32-bit word OR 4 x 256 32-bit word tables: + + t_xxx(f,n) => forward normal round + t_xxx(f,l) => forward last round + t_xxx(i,n) => inverse normal round + t_xxx(i,l) => inverse last round + t_xxx(l,s) => key schedule table + t_xxx(i,m) => key schedule table + + Other variables and tables: + + t_xxx(r,c) => the rcon table +*/ + +#if !defined( _AESTAB_H ) +#define _AESTAB_H + +#if defined(__cplusplus) +extern "C" { +#endif + +#define t_dec(m,n) t_##m##n +#define t_set(m,n) t_##m##n +#define t_use(m,n) t_##m##n + +#if defined(FIXED_TABLES) +# if !defined( __GNUC__ ) && (defined( __MSDOS__ ) || defined( __WIN16__ )) +/* make tables far data to avoid using too much DGROUP space (PG) */ +# define CONST const far +# else +# define CONST const +# endif +#else +# define CONST +#endif + +#if defined(DO_TABLES) +# define EXTERN +#else +# define EXTERN extern +#endif + +#if defined(_MSC_VER) && defined(TABLE_ALIGN) +#define ALIGN __declspec(align(TABLE_ALIGN)) +#else +#define ALIGN +#endif + +#if defined( __WATCOMC__ ) && ( __WATCOMC__ >= 1100 ) +# define XP_DIR __cdecl +#else +# define XP_DIR +#endif + +#if defined(DO_TABLES) && defined(FIXED_TABLES) +#define d_1(t,n,b,e) EXTERN ALIGN CONST XP_DIR t n[256] = b(e) +#define d_4(t,n,b,e,f,g,h) EXTERN ALIGN CONST XP_DIR t n[4][256] = { b(e), b(f), b(g), b(h) } +EXTERN ALIGN CONST uint_32t t_dec(r,c)[RC_LENGTH] = rc_data(w0); +#else +#define d_1(t,n,b,e) EXTERN ALIGN CONST XP_DIR t n[256] +#define d_4(t,n,b,e,f,g,h) EXTERN ALIGN CONST XP_DIR t n[4][256] +EXTERN ALIGN CONST uint_32t t_dec(r,c)[RC_LENGTH]; +#endif + +#if defined( SBX_SET ) + d_1(uint_8t, t_dec(s,box), sb_data, h0); +#endif +#if defined( ISB_SET ) + d_1(uint_8t, t_dec(i,box), isb_data, h0); +#endif + +#if defined( FT1_SET ) + d_1(uint_32t, t_dec(f,n), sb_data, u0); +#endif +#if defined( FT4_SET ) + d_4(uint_32t, t_dec(f,n), sb_data, u0, u1, u2, u3); +#endif + +#if defined( FL1_SET ) + d_1(uint_32t, t_dec(f,l), sb_data, w0); +#endif +#if defined( FL4_SET ) + d_4(uint_32t, t_dec(f,l), sb_data, w0, w1, w2, w3); +#endif + +#if defined( IT1_SET ) + d_1(uint_32t, t_dec(i,n), isb_data, v0); +#endif +#if defined( IT4_SET ) + d_4(uint_32t, t_dec(i,n), isb_data, v0, v1, v2, v3); +#endif + +#if defined( IL1_SET ) + d_1(uint_32t, t_dec(i,l), isb_data, w0); +#endif +#if defined( IL4_SET ) + d_4(uint_32t, t_dec(i,l), isb_data, w0, w1, w2, w3); +#endif + +#if defined( LS1_SET ) +#if defined( FL1_SET ) +#undef LS1_SET +#else + d_1(uint_32t, t_dec(l,s), sb_data, w0); +#endif +#endif + +#if defined( LS4_SET ) +#if defined( FL4_SET ) +#undef LS4_SET +#else + d_4(uint_32t, t_dec(l,s), sb_data, w0, w1, w2, w3); +#endif +#endif + +#if defined( IM1_SET ) + d_1(uint_32t, t_dec(i,m), mm_data, v0); +#endif +#if defined( IM4_SET ) + d_4(uint_32t, t_dec(i,m), mm_data, v0, v1, v2, v3); +#endif + +#if defined(__cplusplus) +} +#endif + +#endif diff --git a/libs/libks/crypt/brg_endian.h b/libs/libks/crypt/brg_endian.h new file mode 100644 index 0000000000..0f12fbbf29 --- /dev/null +++ b/libs/libks/crypt/brg_endian.h @@ -0,0 +1,126 @@ +/* +--------------------------------------------------------------------------- +Copyright (c) 1998-2010, Brian Gladman, Worcester, UK. All rights reserved. + +The redistribution and use of this software (with or without changes) +is allowed without the payment of fees or royalties provided that: + + source code distributions include the above copyright notice, this + list of conditions and the following disclaimer; + + binary distributions include the above copyright notice, this list + of conditions and the following disclaimer in their documentation. + +This software is provided 'as is' with no explicit or implied warranties +in respect of its operation, including, but not limited to, correctness +and fitness for purpose. +--------------------------------------------------------------------------- +Issue Date: 20/12/2007 +*/ + +#ifndef _BRG_ENDIAN_H +#define _BRG_ENDIAN_H + +#define IS_BIG_ENDIAN 4321 /* byte 0 is most significant (mc68k) */ +#define IS_LITTLE_ENDIAN 1234 /* byte 0 is least significant (i386) */ + +/* Include files where endian defines and byteswap functions may reside */ +#if defined( __sun ) +# include +#elif defined( __FreeBSD__ ) || defined( __OpenBSD__ ) || defined( __NetBSD__ ) +# include +#elif defined( BSD ) && ( BSD >= 199103 ) || defined( __APPLE__ ) || \ + defined( __CYGWIN32__ ) || defined( __DJGPP__ ) || defined( __osf__ ) +# include +#elif defined( __linux__ ) || defined( __GNUC__ ) || defined( __GNU_LIBRARY__ ) +# if !defined( __MINGW32__ ) && !defined( _AIX ) +# include +# if !defined( __BEOS__ ) +# include +# endif +# endif +#endif + +/* Now attempt to set the define for platform byte order using any */ +/* of the four forms SYMBOL, _SYMBOL, __SYMBOL & __SYMBOL__, which */ +/* seem to encompass most endian symbol definitions */ + +#if defined( BIG_ENDIAN ) && defined( LITTLE_ENDIAN ) +# if defined( BYTE_ORDER ) && BYTE_ORDER == BIG_ENDIAN +# define PLATFORM_BYTE_ORDER IS_BIG_ENDIAN +# elif defined( BYTE_ORDER ) && BYTE_ORDER == LITTLE_ENDIAN +# define PLATFORM_BYTE_ORDER IS_LITTLE_ENDIAN +# endif +#elif defined( BIG_ENDIAN ) +# define PLATFORM_BYTE_ORDER IS_BIG_ENDIAN +#elif defined( LITTLE_ENDIAN ) +# define PLATFORM_BYTE_ORDER IS_LITTLE_ENDIAN +#endif + +#if defined( _BIG_ENDIAN ) && defined( _LITTLE_ENDIAN ) +# if defined( _BYTE_ORDER ) && _BYTE_ORDER == _BIG_ENDIAN +# define PLATFORM_BYTE_ORDER IS_BIG_ENDIAN +# elif defined( _BYTE_ORDER ) && _BYTE_ORDER == _LITTLE_ENDIAN +# define PLATFORM_BYTE_ORDER IS_LITTLE_ENDIAN +# endif +#elif defined( _BIG_ENDIAN ) +# define PLATFORM_BYTE_ORDER IS_BIG_ENDIAN +#elif defined( _LITTLE_ENDIAN ) +# define PLATFORM_BYTE_ORDER IS_LITTLE_ENDIAN +#endif + +#if defined( __BIG_ENDIAN ) && defined( __LITTLE_ENDIAN ) +# if defined( __BYTE_ORDER ) && __BYTE_ORDER == __BIG_ENDIAN +# define PLATFORM_BYTE_ORDER IS_BIG_ENDIAN +# elif defined( __BYTE_ORDER ) && __BYTE_ORDER == __LITTLE_ENDIAN +# define PLATFORM_BYTE_ORDER IS_LITTLE_ENDIAN +# endif +#elif defined( __BIG_ENDIAN ) +# define PLATFORM_BYTE_ORDER IS_BIG_ENDIAN +#elif defined( __LITTLE_ENDIAN ) +# define PLATFORM_BYTE_ORDER IS_LITTLE_ENDIAN +#endif + +#if defined( __BIG_ENDIAN__ ) && defined( __LITTLE_ENDIAN__ ) +# if defined( __BYTE_ORDER__ ) && __BYTE_ORDER__ == __BIG_ENDIAN__ +# define PLATFORM_BYTE_ORDER IS_BIG_ENDIAN +# elif defined( __BYTE_ORDER__ ) && __BYTE_ORDER__ == __LITTLE_ENDIAN__ +# define PLATFORM_BYTE_ORDER IS_LITTLE_ENDIAN +# endif +#elif defined( __BIG_ENDIAN__ ) +# define PLATFORM_BYTE_ORDER IS_BIG_ENDIAN +#elif defined( __LITTLE_ENDIAN__ ) +# define PLATFORM_BYTE_ORDER IS_LITTLE_ENDIAN +#endif + +/* if the platform byte order could not be determined, then try to */ +/* set this define using common machine defines */ +#if !defined(PLATFORM_BYTE_ORDER) + +#if defined( __alpha__ ) || defined( __alpha ) || defined( i386 ) || \ + defined( __i386__ ) || defined( _M_I86 ) || defined( _M_IX86 ) || \ + defined( __OS2__ ) || defined( sun386 ) || defined( __TURBOC__ ) || \ + defined( vax ) || defined( vms ) || defined( VMS ) || \ + defined( __VMS ) || defined( _M_X64 ) +# define PLATFORM_BYTE_ORDER IS_LITTLE_ENDIAN + +#elif defined( AMIGA ) || defined( applec ) || defined( __AS400__ ) || \ + defined( _CRAY ) || defined( __hppa ) || defined( __hp9000 ) || \ + defined( ibm370 ) || defined( mc68000 ) || defined( m68k ) || \ + defined( __MRC__ ) || defined( __MVS__ ) || defined( __MWERKS__ ) || \ + defined( sparc ) || defined( __sparc) || defined( SYMANTEC_C ) || \ + defined( __VOS__ ) || defined( __TIGCC__ ) || defined( __TANDEM ) || \ + defined( THINK_C ) || defined( __VMCMS__ ) || defined( _AIX ) +# define PLATFORM_BYTE_ORDER IS_BIG_ENDIAN + +#elif 0 /* **** EDIT HERE IF NECESSARY **** */ +# define PLATFORM_BYTE_ORDER IS_LITTLE_ENDIAN +#elif 0 /* **** EDIT HERE IF NECESSARY **** */ +# define PLATFORM_BYTE_ORDER IS_BIG_ENDIAN +#else +# error Please edit lines 126 or 128 in brg_endian.h to set the platform byte order +#endif + +#endif + +#endif diff --git a/libs/libks/crypt/brg_types.h b/libs/libks/crypt/brg_types.h new file mode 100644 index 0000000000..a1d7483a4c --- /dev/null +++ b/libs/libks/crypt/brg_types.h @@ -0,0 +1,219 @@ +/* +--------------------------------------------------------------------------- +Copyright (c) 1998-2010, Brian Gladman, Worcester, UK. All rights reserved. + +The redistribution and use of this software (with or without changes) +is allowed without the payment of fees or royalties provided that: + + source code distributions include the above copyright notice, this + list of conditions and the following disclaimer; + + binary distributions include the above copyright notice, this list + of conditions and the following disclaimer in their documentation. + +This software is provided 'as is' with no explicit or implied warranties +in respect of its operation, including, but not limited to, correctness +and fitness for purpose. +--------------------------------------------------------------------------- +Issue Date: 20/12/2007 + + The unsigned integer types defined here are of the form uint_t where + is the length of the type; for example, the unsigned 32-bit type is + 'uint_32t'. These are NOT the same as the 'C99 integer types' that are + defined in the inttypes.h and stdint.h headers since attempts to use these + types have shown that support for them is still highly variable. However, + since the latter are of the form uint_t, a regular expression search + and replace (in VC++ search on 'uint_{:z}t' and replace with 'uint\1_t') + can be used to convert the types used here to the C99 standard types. +*/ + +#ifndef _BRG_TYPES_H +#define _BRG_TYPES_H + +#if defined(__cplusplus) +extern "C" { +#endif + +#include + +#if defined( _MSC_VER ) && ( _MSC_VER >= 1300 ) +# include +# define ptrint_t intptr_t +#elif defined( __ECOS__ ) +# define intptr_t unsigned int +# define ptrint_t intptr_t +#elif defined( __GNUC__ ) && ( __GNUC__ >= 3 ) +# include +# define ptrint_t intptr_t +#else +# define ptrint_t int +#endif + +#ifndef BRG_UI8 +# define BRG_UI8 +# if UCHAR_MAX == 255u + typedef unsigned char uint_8t; +# else +# error Please define uint_8t as an 8-bit unsigned integer type in brg_types.h +# endif +#endif + +#ifndef BRG_UI16 +# define BRG_UI16 +# if USHRT_MAX == 65535u + typedef unsigned short uint_16t; +# else +# error Please define uint_16t as a 16-bit unsigned short type in brg_types.h +# endif +#endif + +#ifndef BRG_UI32 +# define BRG_UI32 +# if UINT_MAX == 4294967295u +# define li_32(h) 0x##h##u + typedef unsigned int uint_32t; +# elif ULONG_MAX == 4294967295u +# define li_32(h) 0x##h##ul + typedef unsigned long uint_32t; +# elif defined( _CRAY ) +# error This code needs 32-bit data types, which Cray machines do not provide +# else +# error Please define uint_32t as a 32-bit unsigned integer type in brg_types.h +# endif +#endif + +#ifndef BRG_UI64 +# if defined( __BORLANDC__ ) && !defined( __MSDOS__ ) +# define BRG_UI64 +# define li_64(h) 0x##h##ui64 + typedef unsigned __int64 uint_64t; +# elif defined( _MSC_VER ) && ( _MSC_VER < 1300 ) /* 1300 == VC++ 7.0 */ +# define BRG_UI64 +# define li_64(h) 0x##h##ui64 + typedef unsigned __int64 uint_64t; +# elif defined( __sun ) && defined( ULONG_MAX ) && ULONG_MAX == 0xfffffffful +# define BRG_UI64 +# define li_64(h) 0x##h##ull + typedef unsigned long long uint_64t; +# elif defined( __MVS__ ) +# define BRG_UI64 +# define li_64(h) 0x##h##ull + typedef unsigned int long long uint_64t; +# elif defined( UINT_MAX ) && UINT_MAX > 4294967295u +# if UINT_MAX == 18446744073709551615u +# define BRG_UI64 +# define li_64(h) 0x##h##u + typedef unsigned int uint_64t; +# endif +# elif defined( ULONG_MAX ) && ULONG_MAX > 4294967295u +# if ULONG_MAX == 18446744073709551615ul +# define BRG_UI64 +# define li_64(h) 0x##h##ul + typedef unsigned long uint_64t; +# endif +# elif defined( ULLONG_MAX ) && ULLONG_MAX > 4294967295u +# if ULLONG_MAX == 18446744073709551615ull +# define BRG_UI64 +# define li_64(h) 0x##h##ull + typedef unsigned long long uint_64t; +# endif +# elif defined( ULONG_LONG_MAX ) && ULONG_LONG_MAX > 4294967295u +# if ULONG_LONG_MAX == 18446744073709551615ull +# define BRG_UI64 +# define li_64(h) 0x##h##ull + typedef unsigned long long uint_64t; +# endif +# endif +#endif + +#if !defined( BRG_UI64 ) +# if defined( NEED_UINT_64T ) +# error Please define uint_64t as an unsigned 64 bit type in brg_types.h +# endif +#endif + +#ifndef RETURN_VALUES +# define RETURN_VALUES +# if defined( DLL_EXPORT ) +# if defined( _MSC_VER ) || defined ( __INTEL_COMPILER ) +# define VOID_RETURN __declspec( dllexport ) void __stdcall +# define INT_RETURN __declspec( dllexport ) int __stdcall +# elif defined( __GNUC__ ) +# define VOID_RETURN __declspec( __dllexport__ ) void +# define INT_RETURN __declspec( __dllexport__ ) int +# else +# error Use of the DLL is only available on the Microsoft, Intel and GCC compilers +# endif +# elif defined( DLL_IMPORT ) +# if defined( _MSC_VER ) || defined ( __INTEL_COMPILER ) +# define VOID_RETURN __declspec( dllimport ) void __stdcall +# define INT_RETURN __declspec( dllimport ) int __stdcall +# elif defined( __GNUC__ ) +# define VOID_RETURN __declspec( __dllimport__ ) void +# define INT_RETURN __declspec( __dllimport__ ) int +# else +# error Use of the DLL is only available on the Microsoft, Intel and GCC compilers +# endif +# elif defined( __WATCOMC__ ) +# define VOID_RETURN void __cdecl +# define INT_RETURN int __cdecl +# else +# define VOID_RETURN void +# define INT_RETURN int +# endif +#endif + +/* These defines are used to detect and set the memory alignment of pointers. + Note that offsets are in bytes. + + ALIGN_OFFSET(x,n) return the positive or zero offset of + the memory addressed by the pointer 'x' + from an address that is aligned on an + 'n' byte boundary ('n' is a power of 2) + + ALIGN_FLOOR(x,n) return a pointer that points to memory + that is aligned on an 'n' byte boundary + and is not higher than the memory address + pointed to by 'x' ('n' is a power of 2) + + ALIGN_CEIL(x,n) return a pointer that points to memory + that is aligned on an 'n' byte boundary + and is not lower than the memory address + pointed to by 'x' ('n' is a power of 2) +*/ + +#define ALIGN_OFFSET(x,n) (((ptrint_t)(x)) & ((n) - 1)) +#define ALIGN_FLOOR(x,n) ((uint_8t*)(x) - ( ((ptrint_t)(x)) & ((n) - 1))) +#define ALIGN_CEIL(x,n) ((uint_8t*)(x) + (-((ptrint_t)(x)) & ((n) - 1))) + +/* These defines are used to declare buffers in a way that allows + faster operations on longer variables to be used. In all these + defines 'size' must be a power of 2 and >= 8. NOTE that the + buffer size is in bytes but the type length is in bits + + UNIT_TYPEDEF(x,size) declares a variable 'x' of length + 'size' bits + + BUFR_TYPEDEF(x,size,bsize) declares a buffer 'x' of length 'bsize' + bytes defined as an array of variables + each of 'size' bits (bsize must be a + multiple of size / 8) + + UNIT_CAST(x,size) casts a variable to a type of + length 'size' bits + + UPTR_CAST(x,size) casts a pointer to a pointer to a + varaiable of length 'size' bits +*/ + +#define UI_TYPE(size) uint_##size##t +#define UNIT_TYPEDEF(x,size) typedef UI_TYPE(size) x +#define BUFR_TYPEDEF(x,size,bsize) typedef UI_TYPE(size) x[bsize / (size >> 3)] +#define UNIT_CAST(x,size) ((UI_TYPE(size) )(x)) +#define UPTR_CAST(x,size) ((UI_TYPE(size)*)(x)) + +#if defined(__cplusplus) +} +#endif + +#endif diff --git a/libs/libks/crypt/sha2.c b/libs/libks/crypt/sha2.c new file mode 100644 index 0000000000..5b565d0e01 --- /dev/null +++ b/libs/libks/crypt/sha2.c @@ -0,0 +1,773 @@ +/* + --------------------------------------------------------------------------- + Copyright (c) 2002, Dr Brian Gladman, Worcester, UK. All rights reserved. + + LICENSE TERMS + + The free distribution and use of this software in both source and binary + form is allowed (with or without changes) provided that: + + 1. distributions of this source code include the above copyright + notice, this list of conditions and the following disclaimer; + + 2. distributions in binary form include the above copyright + notice, this list of conditions and the following disclaimer + in the documentation and/or other associated materials; + + 3. the copyright holder's name is not used to endorse products + built using this software without specific written permission. + + ALTERNATIVELY, provided that this notice is retained in full, this product + may be distributed under the terms of the GNU General Public License (GPL), + in which case the provisions of the GPL apply INSTEAD OF those given above. + + DISCLAIMER + + This software is provided 'as is' with no explicit or implied warranties + in respect of its properties, including, but not limited to, correctness + and/or fitness for purpose. + --------------------------------------------------------------------------- + Issue Date: 01/08/2005 + + This is a byte oriented version of SHA2 that operates on arrays of bytes + stored in memory. This code implements sha256, sha384 and sha512 but the + latter two functions rely on efficient 64-bit integer operations that + may not be very efficient on 32-bit machines + + The sha256 functions use a type 'sha256_ctx' to hold details of the + current hash state and uses the following three calls: + + void sha256_begin(sha256_ctx ctx[1]) + void sha256_hash(const unsigned char data[], + unsigned long len, sha256_ctx ctx[1]) + void sha_end1(unsigned char hval[], sha256_ctx ctx[1]) + + The first subroutine initialises a hash computation by setting up the + context in the sha256_ctx context. The second subroutine hashes 8-bit + bytes from array data[] into the hash state withinh sha256_ctx context, + the number of bytes to be hashed being given by the the unsigned long + integer len. The third subroutine completes the hash calculation and + places the resulting digest value in the array of 8-bit bytes hval[]. + + The sha384 and sha512 functions are similar and use the interfaces: + + void sha384_begin(sha384_ctx ctx[1]); + void sha384_hash(const unsigned char data[], + unsigned long len, sha384_ctx ctx[1]); + void sha384_end(unsigned char hval[], sha384_ctx ctx[1]); + + void sha512_begin(sha512_ctx ctx[1]); + void sha512_hash(const unsigned char data[], + unsigned long len, sha512_ctx ctx[1]); + void sha512_end(unsigned char hval[], sha512_ctx ctx[1]); + + In addition there is a function sha2 that can be used to call all these + functions using a call with a hash length parameter as follows: + + int sha2_begin(unsigned long len, sha2_ctx ctx[1]); + void sha2_hash(const unsigned char data[], + unsigned long len, sha2_ctx ctx[1]); + void sha2_end(unsigned char hval[], sha2_ctx ctx[1]); + + My thanks to Erik Andersen for testing this code + on big-endian systems and for his assistance with corrections +*/ + +#if 0 +#define UNROLL_SHA2 /* for SHA2 loop unroll */ +#endif + +#include /* for memcpy() etc. */ + +#include "sha2.h" + +#include + +#if defined(__cplusplus) +extern "C" +{ +#endif + +#if defined( _MSC_VER ) && ( _MSC_VER > 800 ) +#pragma intrinsic(memcpy) +#endif + +#if 0 && defined(_MSC_VER) +#define rotl32 _lrotl +#define rotr32 _lrotr +#else +#define rotl32(x,n) (((x) << n) | ((x) >> (32 - n))) +#define rotr32(x,n) (((x) >> n) | ((x) << (32 - n))) +#endif + +#if !defined(bswap_32) +#define bswap_32(x) ((rotr32((x), 24) & 0x00ff00ff) | (rotr32((x), 8) & 0xff00ff00)) +#endif + +#if (PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN) +#define SWAP_BYTES +#else +#undef SWAP_BYTES +#endif + +#if 0 + +#define ch(x,y,z) (((x) & (y)) ^ (~(x) & (z))) +#define maj(x,y,z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z))) + +#else /* Thanks to Rich Schroeppel and Colin Plumb for the following */ + +#define ch(x,y,z) ((z) ^ ((x) & ((y) ^ (z)))) +#define maj(x,y,z) (((x) & (y)) | ((z) & ((x) ^ (y)))) + +#endif + +/* round transforms for SHA256 and SHA512 compression functions */ + +#define vf(n,i) v[(n - i) & 7] + +#define hf(i) (p[i & 15] += \ + g_1(p[(i + 14) & 15]) + p[(i + 9) & 15] + g_0(p[(i + 1) & 15])) + +#define v_cycle(i,j) \ + vf(7,i) += (j ? hf(i) : p[i]) + k_0[i+j] \ + + s_1(vf(4,i)) + ch(vf(4,i),vf(5,i),vf(6,i)); \ + vf(3,i) += vf(7,i); \ + vf(7,i) += s_0(vf(0,i))+ maj(vf(0,i),vf(1,i),vf(2,i)) + +#if defined(SHA_224) || defined(SHA_256) + +#define SHA256_MASK (SHA256_BLOCK_SIZE - 1) + +#if defined(SWAP_BYTES) +#define bsw_32(p,n) \ + { int _i = (n); while(_i--) ((uint_32t*)p)[_i] = bswap_32(((uint_32t*)p)[_i]); } +#else +#define bsw_32(p,n) +#endif + +#define s_0(x) (rotr32((x), 2) ^ rotr32((x), 13) ^ rotr32((x), 22)) +#define s_1(x) (rotr32((x), 6) ^ rotr32((x), 11) ^ rotr32((x), 25)) +#define g_0(x) (rotr32((x), 7) ^ rotr32((x), 18) ^ ((x) >> 3)) +#define g_1(x) (rotr32((x), 17) ^ rotr32((x), 19) ^ ((x) >> 10)) +#define k_0 k256 + +/* rotated SHA256 round definition. Rather than swapping variables as in */ +/* FIPS-180, different variables are 'rotated' on each round, returning */ +/* to their starting positions every eight rounds */ + +#define q(n) v##n + +#define one_cycle(a,b,c,d,e,f,g,h,k,w) \ + q(h) += s_1(q(e)) + ch(q(e), q(f), q(g)) + k + w; \ + q(d) += q(h); q(h) += s_0(q(a)) + maj(q(a), q(b), q(c)) + +/* SHA256 mixing data */ + +const uint_32t k256[64] = +{ 0x428a2f98ul, 0x71374491ul, 0xb5c0fbcful, 0xe9b5dba5ul, + 0x3956c25bul, 0x59f111f1ul, 0x923f82a4ul, 0xab1c5ed5ul, + 0xd807aa98ul, 0x12835b01ul, 0x243185beul, 0x550c7dc3ul, + 0x72be5d74ul, 0x80deb1feul, 0x9bdc06a7ul, 0xc19bf174ul, + 0xe49b69c1ul, 0xefbe4786ul, 0x0fc19dc6ul, 0x240ca1ccul, + 0x2de92c6ful, 0x4a7484aaul, 0x5cb0a9dcul, 0x76f988daul, + 0x983e5152ul, 0xa831c66dul, 0xb00327c8ul, 0xbf597fc7ul, + 0xc6e00bf3ul, 0xd5a79147ul, 0x06ca6351ul, 0x14292967ul, + 0x27b70a85ul, 0x2e1b2138ul, 0x4d2c6dfcul, 0x53380d13ul, + 0x650a7354ul, 0x766a0abbul, 0x81c2c92eul, 0x92722c85ul, + 0xa2bfe8a1ul, 0xa81a664bul, 0xc24b8b70ul, 0xc76c51a3ul, + 0xd192e819ul, 0xd6990624ul, 0xf40e3585ul, 0x106aa070ul, + 0x19a4c116ul, 0x1e376c08ul, 0x2748774cul, 0x34b0bcb5ul, + 0x391c0cb3ul, 0x4ed8aa4aul, 0x5b9cca4ful, 0x682e6ff3ul, + 0x748f82eeul, 0x78a5636ful, 0x84c87814ul, 0x8cc70208ul, + 0x90befffaul, 0xa4506cebul, 0xbef9a3f7ul, 0xc67178f2ul, +}; + +/* Compile 64 bytes of hash data into SHA256 digest value */ +/* NOTE: this routine assumes that the byte order in the */ +/* ctx->wbuf[] at this point is such that low address bytes */ +/* in the ORIGINAL byte stream will go into the high end of */ +/* words on BOTH big and little endian systems */ + +VOID_RETURN sha256_compile(sha256_ctx ctx[1]) +{ +#if !defined(UNROLL_SHA2) + + uint_32t j, *p = ctx->wbuf, v[8]; + + memcpy(v, ctx->hash, 8 * sizeof(uint_32t)); + + for(j = 0; j < 64; j += 16) + { + v_cycle( 0, j); v_cycle( 1, j); + v_cycle( 2, j); v_cycle( 3, j); + v_cycle( 4, j); v_cycle( 5, j); + v_cycle( 6, j); v_cycle( 7, j); + v_cycle( 8, j); v_cycle( 9, j); + v_cycle(10, j); v_cycle(11, j); + v_cycle(12, j); v_cycle(13, j); + v_cycle(14, j); v_cycle(15, j); + } + + ctx->hash[0] += v[0]; ctx->hash[1] += v[1]; + ctx->hash[2] += v[2]; ctx->hash[3] += v[3]; + ctx->hash[4] += v[4]; ctx->hash[5] += v[5]; + ctx->hash[6] += v[6]; ctx->hash[7] += v[7]; + +#else + + uint_32t *p = ctx->wbuf,v0,v1,v2,v3,v4,v5,v6,v7; + + v0 = ctx->hash[0]; v1 = ctx->hash[1]; + v2 = ctx->hash[2]; v3 = ctx->hash[3]; + v4 = ctx->hash[4]; v5 = ctx->hash[5]; + v6 = ctx->hash[6]; v7 = ctx->hash[7]; + + one_cycle(0,1,2,3,4,5,6,7,k256[ 0],p[ 0]); + one_cycle(7,0,1,2,3,4,5,6,k256[ 1],p[ 1]); + one_cycle(6,7,0,1,2,3,4,5,k256[ 2],p[ 2]); + one_cycle(5,6,7,0,1,2,3,4,k256[ 3],p[ 3]); + one_cycle(4,5,6,7,0,1,2,3,k256[ 4],p[ 4]); + one_cycle(3,4,5,6,7,0,1,2,k256[ 5],p[ 5]); + one_cycle(2,3,4,5,6,7,0,1,k256[ 6],p[ 6]); + one_cycle(1,2,3,4,5,6,7,0,k256[ 7],p[ 7]); + one_cycle(0,1,2,3,4,5,6,7,k256[ 8],p[ 8]); + one_cycle(7,0,1,2,3,4,5,6,k256[ 9],p[ 9]); + one_cycle(6,7,0,1,2,3,4,5,k256[10],p[10]); + one_cycle(5,6,7,0,1,2,3,4,k256[11],p[11]); + one_cycle(4,5,6,7,0,1,2,3,k256[12],p[12]); + one_cycle(3,4,5,6,7,0,1,2,k256[13],p[13]); + one_cycle(2,3,4,5,6,7,0,1,k256[14],p[14]); + one_cycle(1,2,3,4,5,6,7,0,k256[15],p[15]); + + one_cycle(0,1,2,3,4,5,6,7,k256[16],hf( 0)); + one_cycle(7,0,1,2,3,4,5,6,k256[17],hf( 1)); + one_cycle(6,7,0,1,2,3,4,5,k256[18],hf( 2)); + one_cycle(5,6,7,0,1,2,3,4,k256[19],hf( 3)); + one_cycle(4,5,6,7,0,1,2,3,k256[20],hf( 4)); + one_cycle(3,4,5,6,7,0,1,2,k256[21],hf( 5)); + one_cycle(2,3,4,5,6,7,0,1,k256[22],hf( 6)); + one_cycle(1,2,3,4,5,6,7,0,k256[23],hf( 7)); + one_cycle(0,1,2,3,4,5,6,7,k256[24],hf( 8)); + one_cycle(7,0,1,2,3,4,5,6,k256[25],hf( 9)); + one_cycle(6,7,0,1,2,3,4,5,k256[26],hf(10)); + one_cycle(5,6,7,0,1,2,3,4,k256[27],hf(11)); + one_cycle(4,5,6,7,0,1,2,3,k256[28],hf(12)); + one_cycle(3,4,5,6,7,0,1,2,k256[29],hf(13)); + one_cycle(2,3,4,5,6,7,0,1,k256[30],hf(14)); + one_cycle(1,2,3,4,5,6,7,0,k256[31],hf(15)); + + one_cycle(0,1,2,3,4,5,6,7,k256[32],hf( 0)); + one_cycle(7,0,1,2,3,4,5,6,k256[33],hf( 1)); + one_cycle(6,7,0,1,2,3,4,5,k256[34],hf( 2)); + one_cycle(5,6,7,0,1,2,3,4,k256[35],hf( 3)); + one_cycle(4,5,6,7,0,1,2,3,k256[36],hf( 4)); + one_cycle(3,4,5,6,7,0,1,2,k256[37],hf( 5)); + one_cycle(2,3,4,5,6,7,0,1,k256[38],hf( 6)); + one_cycle(1,2,3,4,5,6,7,0,k256[39],hf( 7)); + one_cycle(0,1,2,3,4,5,6,7,k256[40],hf( 8)); + one_cycle(7,0,1,2,3,4,5,6,k256[41],hf( 9)); + one_cycle(6,7,0,1,2,3,4,5,k256[42],hf(10)); + one_cycle(5,6,7,0,1,2,3,4,k256[43],hf(11)); + one_cycle(4,5,6,7,0,1,2,3,k256[44],hf(12)); + one_cycle(3,4,5,6,7,0,1,2,k256[45],hf(13)); + one_cycle(2,3,4,5,6,7,0,1,k256[46],hf(14)); + one_cycle(1,2,3,4,5,6,7,0,k256[47],hf(15)); + + one_cycle(0,1,2,3,4,5,6,7,k256[48],hf( 0)); + one_cycle(7,0,1,2,3,4,5,6,k256[49],hf( 1)); + one_cycle(6,7,0,1,2,3,4,5,k256[50],hf( 2)); + one_cycle(5,6,7,0,1,2,3,4,k256[51],hf( 3)); + one_cycle(4,5,6,7,0,1,2,3,k256[52],hf( 4)); + one_cycle(3,4,5,6,7,0,1,2,k256[53],hf( 5)); + one_cycle(2,3,4,5,6,7,0,1,k256[54],hf( 6)); + one_cycle(1,2,3,4,5,6,7,0,k256[55],hf( 7)); + one_cycle(0,1,2,3,4,5,6,7,k256[56],hf( 8)); + one_cycle(7,0,1,2,3,4,5,6,k256[57],hf( 9)); + one_cycle(6,7,0,1,2,3,4,5,k256[58],hf(10)); + one_cycle(5,6,7,0,1,2,3,4,k256[59],hf(11)); + one_cycle(4,5,6,7,0,1,2,3,k256[60],hf(12)); + one_cycle(3,4,5,6,7,0,1,2,k256[61],hf(13)); + one_cycle(2,3,4,5,6,7,0,1,k256[62],hf(14)); + one_cycle(1,2,3,4,5,6,7,0,k256[63],hf(15)); + + ctx->hash[0] += v0; ctx->hash[1] += v1; + ctx->hash[2] += v2; ctx->hash[3] += v3; + ctx->hash[4] += v4; ctx->hash[5] += v5; + ctx->hash[6] += v6; ctx->hash[7] += v7; +#endif +} + +/* SHA256 hash data in an array of bytes into hash buffer */ +/* and call the hash_compile function as required. */ + +VOID_RETURN sha256_hash(const unsigned char data[], unsigned long len, sha256_ctx ctx[1]) +{ uint_32t pos = (uint_32t)(ctx->count[0] & SHA256_MASK), + space = SHA256_BLOCK_SIZE - pos; + const unsigned char *sp = data; + + if((ctx->count[0] += len) < len) + ++(ctx->count[1]); + + while(len >= space) /* tranfer whole blocks while possible */ + { + memcpy(((unsigned char*)ctx->wbuf) + pos, sp, space); + sp += space; len -= space; space = SHA256_BLOCK_SIZE; pos = 0; + bsw_32(ctx->wbuf, SHA256_BLOCK_SIZE >> 2) + sha256_compile(ctx); + } + + memcpy(((unsigned char*)ctx->wbuf) + pos, sp, len); +} + +/* SHA256 Final padding and digest calculation */ + +static void sha_end1(unsigned char hval[], sha256_ctx ctx[1], const unsigned int hlen) +{ uint_32t i = (uint_32t)(ctx->count[0] & SHA256_MASK); + + /* put bytes in the buffer in an order in which references to */ + /* 32-bit words will put bytes with lower addresses into the */ + /* top of 32 bit words on BOTH big and little endian machines */ + bsw_32(ctx->wbuf, (i + 3) >> 2) + + /* we now need to mask valid bytes and add the padding which is */ + /* a single 1 bit and as many zero bits as necessary. Note that */ + /* we can always add the first padding byte here because the */ + /* buffer always has at least one empty slot */ + ctx->wbuf[i >> 2] &= 0xffffff80 << 8 * (~i & 3); + ctx->wbuf[i >> 2] |= 0x00000080 << 8 * (~i & 3); + + /* we need 9 or more empty positions, one for the padding byte */ + /* (above) and eight for the length count. If there is not */ + /* enough space pad and empty the buffer */ + if(i > SHA256_BLOCK_SIZE - 9) + { + if(i < 60) ctx->wbuf[15] = 0; + sha256_compile(ctx); + i = 0; + } + else /* compute a word index for the empty buffer positions */ + i = (i >> 2) + 1; + + while(i < 14) /* and zero pad all but last two positions */ + ctx->wbuf[i++] = 0; + + /* the following 32-bit length fields are assembled in the */ + /* wrong byte order on little endian machines but this is */ + /* corrected later since they are only ever used as 32-bit */ + /* word values. */ + ctx->wbuf[14] = (ctx->count[1] << 3) | (ctx->count[0] >> 29); + ctx->wbuf[15] = ctx->count[0] << 3; + sha256_compile(ctx); + + /* extract the hash value as bytes in case the hash buffer is */ + /* mislaigned for 32-bit words */ + for(i = 0; i < hlen; ++i) + hval[i] = (unsigned char)(ctx->hash[i >> 2] >> (8 * (~i & 3))); +} + +#endif + +#if defined(SHA_224) + +const uint_32t i224[8] = +{ + 0xc1059ed8ul, 0x367cd507ul, 0x3070dd17ul, 0xf70e5939ul, + 0xffc00b31ul, 0x68581511ul, 0x64f98fa7ul, 0xbefa4fa4ul +}; + +VOID_RETURN sha224_begin(sha224_ctx ctx[1]) +{ + ctx->count[0] = ctx->count[1] = 0; + memcpy(ctx->hash, i224, 8 * sizeof(uint_32t)); +} + +VOID_RETURN sha224_end(unsigned char hval[], sha224_ctx ctx[1]) +{ + sha_end1(hval, ctx, SHA224_DIGEST_SIZE); +} + +VOID_RETURN sha224_zrtp(unsigned char hval[], const unsigned char data[], unsigned long len) +{ sha224_ctx cx[1]; + + sha224_begin(cx); + sha224_hash(data, len, cx); + sha_end1(hval, cx, SHA224_DIGEST_SIZE); +} + +#endif + +#if defined(SHA_256) + +const uint_32t i256[8] = +{ + 0x6a09e667ul, 0xbb67ae85ul, 0x3c6ef372ul, 0xa54ff53aul, + 0x510e527ful, 0x9b05688cul, 0x1f83d9abul, 0x5be0cd19ul +}; + +VOID_RETURN sha256_begin(sha256_ctx ctx[1]) +{ + ctx->count[0] = ctx->count[1] = 0; + memcpy(ctx->hash, i256, 8 * sizeof(uint_32t)); +} + +VOID_RETURN sha256_end(unsigned char hval[], sha256_ctx ctx[1]) +{ + sha_end1(hval, ctx, SHA256_DIGEST_SIZE); +} + +VOID_RETURN sha256_zrtp(unsigned char hval[], const unsigned char data[], unsigned long len) +{ sha256_ctx cx[1]; + + sha256_begin(cx); + sha256_hash(data, len, cx); + sha_end1(hval, cx, SHA256_DIGEST_SIZE); +} + +#endif + +#if defined(SHA_384) || defined(SHA_512) + +#define SHA512_MASK (SHA512_BLOCK_SIZE - 1) + +#define rotr64(x,n) (((x) >> n) | ((x) << (64 - n))) + +#if !defined(bswap_64) +#define bswap_64(x) (((uint_64t)(bswap_32((uint_32t)(x)))) << 32 | bswap_32((uint_32t)((x) >> 32))) +#endif + +#if defined(SWAP_BYTES) +#define bsw_64(p,n) \ + { int _i = (n); while(_i--) ((uint_64t*)p)[_i] = bswap_64(((uint_64t*)p)[_i]); } +#else +#define bsw_64(p,n) +#endif + +/* SHA512 mixing function definitions */ + +#ifdef s_0 +# undef s_0 +# undef s_1 +# undef g_0 +# undef g_1 +# undef k_0 +#endif + +#define s_0(x) (rotr64((x), 28) ^ rotr64((x), 34) ^ rotr64((x), 39)) +#define s_1(x) (rotr64((x), 14) ^ rotr64((x), 18) ^ rotr64((x), 41)) +#define g_0(x) (rotr64((x), 1) ^ rotr64((x), 8) ^ ((x) >> 7)) +#define g_1(x) (rotr64((x), 19) ^ rotr64((x), 61) ^ ((x) >> 6)) +#define k_0 k512 + +/* SHA384/SHA512 mixing data */ + +const uint_64t k512[80] = +{ + li_64(428a2f98d728ae22), li_64(7137449123ef65cd), + li_64(b5c0fbcfec4d3b2f), li_64(e9b5dba58189dbbc), + li_64(3956c25bf348b538), li_64(59f111f1b605d019), + li_64(923f82a4af194f9b), li_64(ab1c5ed5da6d8118), + li_64(d807aa98a3030242), li_64(12835b0145706fbe), + li_64(243185be4ee4b28c), li_64(550c7dc3d5ffb4e2), + li_64(72be5d74f27b896f), li_64(80deb1fe3b1696b1), + li_64(9bdc06a725c71235), li_64(c19bf174cf692694), + li_64(e49b69c19ef14ad2), li_64(efbe4786384f25e3), + li_64(0fc19dc68b8cd5b5), li_64(240ca1cc77ac9c65), + li_64(2de92c6f592b0275), li_64(4a7484aa6ea6e483), + li_64(5cb0a9dcbd41fbd4), li_64(76f988da831153b5), + li_64(983e5152ee66dfab), li_64(a831c66d2db43210), + li_64(b00327c898fb213f), li_64(bf597fc7beef0ee4), + li_64(c6e00bf33da88fc2), li_64(d5a79147930aa725), + li_64(06ca6351e003826f), li_64(142929670a0e6e70), + li_64(27b70a8546d22ffc), li_64(2e1b21385c26c926), + li_64(4d2c6dfc5ac42aed), li_64(53380d139d95b3df), + li_64(650a73548baf63de), li_64(766a0abb3c77b2a8), + li_64(81c2c92e47edaee6), li_64(92722c851482353b), + li_64(a2bfe8a14cf10364), li_64(a81a664bbc423001), + li_64(c24b8b70d0f89791), li_64(c76c51a30654be30), + li_64(d192e819d6ef5218), li_64(d69906245565a910), + li_64(f40e35855771202a), li_64(106aa07032bbd1b8), + li_64(19a4c116b8d2d0c8), li_64(1e376c085141ab53), + li_64(2748774cdf8eeb99), li_64(34b0bcb5e19b48a8), + li_64(391c0cb3c5c95a63), li_64(4ed8aa4ae3418acb), + li_64(5b9cca4f7763e373), li_64(682e6ff3d6b2b8a3), + li_64(748f82ee5defb2fc), li_64(78a5636f43172f60), + li_64(84c87814a1f0ab72), li_64(8cc702081a6439ec), + li_64(90befffa23631e28), li_64(a4506cebde82bde9), + li_64(bef9a3f7b2c67915), li_64(c67178f2e372532b), + li_64(ca273eceea26619c), li_64(d186b8c721c0c207), + li_64(eada7dd6cde0eb1e), li_64(f57d4f7fee6ed178), + li_64(06f067aa72176fba), li_64(0a637dc5a2c898a6), + li_64(113f9804bef90dae), li_64(1b710b35131c471b), + li_64(28db77f523047d84), li_64(32caab7b40c72493), + li_64(3c9ebe0a15c9bebc), li_64(431d67c49c100d4c), + li_64(4cc5d4becb3e42b6), li_64(597f299cfc657e2a), + li_64(5fcb6fab3ad6faec), li_64(6c44198c4a475817) +}; + +/* Compile 128 bytes of hash data into SHA384/512 digest */ +/* NOTE: this routine assumes that the byte order in the */ +/* ctx->wbuf[] at this point is such that low address bytes */ +/* in the ORIGINAL byte stream will go into the high end of */ +/* words on BOTH big and little endian systems */ + +VOID_RETURN sha512_compile(sha512_ctx ctx[1]) +{ uint_64t v[8], *p = ctx->wbuf; + uint_32t j; + + memcpy(v, ctx->hash, 8 * sizeof(uint_64t)); + + for(j = 0; j < 80; j += 16) + { + v_cycle( 0, j); v_cycle( 1, j); + v_cycle( 2, j); v_cycle( 3, j); + v_cycle( 4, j); v_cycle( 5, j); + v_cycle( 6, j); v_cycle( 7, j); + v_cycle( 8, j); v_cycle( 9, j); + v_cycle(10, j); v_cycle(11, j); + v_cycle(12, j); v_cycle(13, j); + v_cycle(14, j); v_cycle(15, j); + } + + ctx->hash[0] += v[0]; ctx->hash[1] += v[1]; + ctx->hash[2] += v[2]; ctx->hash[3] += v[3]; + ctx->hash[4] += v[4]; ctx->hash[5] += v[5]; + ctx->hash[6] += v[6]; ctx->hash[7] += v[7]; +} + +/* Compile 128 bytes of hash data into SHA256 digest value */ +/* NOTE: this routine assumes that the byte order in the */ +/* ctx->wbuf[] at this point is in such an order that low */ +/* address bytes in the ORIGINAL byte stream placed in this */ +/* buffer will now go to the high end of words on BOTH big */ +/* and little endian systems */ + +VOID_RETURN sha512_hash(const unsigned char data[], unsigned long len, sha512_ctx ctx[1]) +{ uint_32t pos = (uint_32t)(ctx->count[0] & SHA512_MASK), + space = SHA512_BLOCK_SIZE - pos; + const unsigned char *sp = data; + + if((ctx->count[0] += len) < len) + ++(ctx->count[1]); + + while(len >= space) /* tranfer whole blocks while possible */ + { + memcpy(((unsigned char*)ctx->wbuf) + pos, sp, space); + sp += space; len -= space; space = SHA512_BLOCK_SIZE; pos = 0; + bsw_64(ctx->wbuf, SHA512_BLOCK_SIZE >> 3); + sha512_compile(ctx); + } + + memcpy(((unsigned char*)ctx->wbuf) + pos, sp, len); +} + +/* SHA384/512 Final padding and digest calculation */ + +static void sha_end2(unsigned char hval[], sha512_ctx ctx[1], const unsigned int hlen) +{ uint_32t i = (uint_32t)(ctx->count[0] & SHA512_MASK); + + /* put bytes in the buffer in an order in which references to */ + /* 32-bit words will put bytes with lower addresses into the */ + /* top of 32 bit words on BOTH big and little endian machines */ + bsw_64(ctx->wbuf, (i + 7) >> 3); + + /* we now need to mask valid bytes and add the padding which is */ + /* a single 1 bit and as many zero bits as necessary. Note that */ + /* we can always add the first padding byte here because the */ + /* buffer always has at least one empty slot */ + ctx->wbuf[i >> 3] &= li_64(ffffffffffffff00) << 8 * (~i & 7); + ctx->wbuf[i >> 3] |= li_64(0000000000000080) << 8 * (~i & 7); + + /* we need 17 or more empty byte positions, one for the padding */ + /* byte (above) and sixteen for the length count. If there is */ + /* not enough space pad and empty the buffer */ + if(i > SHA512_BLOCK_SIZE - 17) + { + if(i < 120) ctx->wbuf[15] = 0; + sha512_compile(ctx); + i = 0; + } + else + i = (i >> 3) + 1; + + while(i < 14) + ctx->wbuf[i++] = 0; + + /* the following 64-bit length fields are assembled in the */ + /* wrong byte order on little endian machines but this is */ + /* corrected later since they are only ever used as 64-bit */ + /* word values. */ + ctx->wbuf[14] = (ctx->count[1] << 3) | (ctx->count[0] >> 61); + ctx->wbuf[15] = ctx->count[0] << 3; + sha512_compile(ctx); + + /* extract the hash value as bytes in case the hash buffer is */ + /* misaligned for 32-bit words */ + for(i = 0; i < hlen; ++i) + hval[i] = (unsigned char)(ctx->hash[i >> 3] >> (8 * (~i & 7))); +} + +#endif + +#if defined(SHA_384) + +/* SHA384 initialisation data */ + +const uint_64t i384[80] = +{ + li_64(cbbb9d5dc1059ed8), li_64(629a292a367cd507), + li_64(9159015a3070dd17), li_64(152fecd8f70e5939), + li_64(67332667ffc00b31), li_64(8eb44a8768581511), + li_64(db0c2e0d64f98fa7), li_64(47b5481dbefa4fa4) +}; + +VOID_RETURN sha384_begin(sha384_ctx ctx[1]) +{ + ctx->count[0] = ctx->count[1] = 0; + memcpy(ctx->hash, i384, 8 * sizeof(uint_64t)); +} + +VOID_RETURN sha384_end(unsigned char hval[], sha384_ctx ctx[1]) +{ + sha_end2(hval, ctx, SHA384_DIGEST_SIZE); +} + +VOID_RETURN sha384_zrtp(unsigned char hval[], const unsigned char data[], unsigned long len) +{ sha384_ctx cx[1]; + + sha384_begin(cx); + sha384_hash(data, len, cx); + sha_end2(hval, cx, SHA384_DIGEST_SIZE); +} + +#endif + +#if defined(SHA_512) + +/* SHA512 initialisation data */ + +const uint_64t i512[80] = +{ + li_64(6a09e667f3bcc908), li_64(bb67ae8584caa73b), + li_64(3c6ef372fe94f82b), li_64(a54ff53a5f1d36f1), + li_64(510e527fade682d1), li_64(9b05688c2b3e6c1f), + li_64(1f83d9abfb41bd6b), li_64(5be0cd19137e2179) +}; + +VOID_RETURN sha512_begin(sha512_ctx ctx[1]) +{ + ctx->count[0] = ctx->count[1] = 0; + memcpy(ctx->hash, i512, 8 * sizeof(uint_64t)); +} + +VOID_RETURN sha512_end(unsigned char hval[], sha512_ctx ctx[1]) +{ + sha_end2(hval, ctx, SHA512_DIGEST_SIZE); +} + +VOID_RETURN sha512_zrtp(unsigned char hval[], const unsigned char data[], unsigned long len) +{ sha512_ctx cx[1]; + + sha512_begin(cx); + sha512_hash(data, len, cx); + sha_end2(hval, cx, SHA512_DIGEST_SIZE); +} + +#endif + +#if defined(SHA_2) + +#define CTX_224(x) ((x)->uu->ctx256) +#define CTX_256(x) ((x)->uu->ctx256) +#define CTX_384(x) ((x)->uu->ctx512) +#define CTX_512(x) ((x)->uu->ctx512) + +/* SHA2 initialisation */ + +INT_RETURN sha2_begin(unsigned long len, sha2_ctx ctx[1]) +{ + switch(len) + { +#if defined(SHA_224) + case 224: + case 28: CTX_256(ctx)->count[0] = CTX_256(ctx)->count[1] = 0; + memcpy(CTX_256(ctx)->hash, i224, 32); + ctx->sha2_len = 28; return EXIT_SUCCESS; +#endif +#if defined(SHA_256) + case 256: + case 32: CTX_256(ctx)->count[0] = CTX_256(ctx)->count[1] = 0; + memcpy(CTX_256(ctx)->hash, i256, 32); + ctx->sha2_len = 32; return EXIT_SUCCESS; +#endif +#if defined(SHA_384) + case 384: + case 48: CTX_384(ctx)->count[0] = CTX_384(ctx)->count[1] = 0; + memcpy(CTX_384(ctx)->hash, i384, 64); + ctx->sha2_len = 48; return EXIT_SUCCESS; +#endif +#if defined(SHA_512) + case 512: + case 64: CTX_512(ctx)->count[0] = CTX_512(ctx)->count[1] = 0; + memcpy(CTX_512(ctx)->hash, i512, 64); + ctx->sha2_len = 64; return EXIT_SUCCESS; +#endif + default: return EXIT_FAILURE; + } +} + +VOID_RETURN sha2_hash(const unsigned char data[], unsigned long len, sha2_ctx ctx[1]) +{ + switch(ctx->sha2_len) + { +#if defined(SHA_224) + case 28: sha224_hash(data, len, CTX_224(ctx)); return; +#endif +#if defined(SHA_256) + case 32: sha256_hash(data, len, CTX_256(ctx)); return; +#endif +#if defined(SHA_384) + case 48: sha384_hash(data, len, CTX_384(ctx)); return; +#endif +#if defined(SHA_512) + case 64: sha512_hash(data, len, CTX_512(ctx)); return; +#endif + } +} + +VOID_RETURN sha2_end(unsigned char hval[], sha2_ctx ctx[1]) +{ + switch(ctx->sha2_len) + { +#if defined(SHA_224) + case 28: sha_end1(hval, CTX_224(ctx), SHA224_DIGEST_SIZE); return; +#endif +#if defined(SHA_256) + case 32: sha_end1(hval, CTX_256(ctx), SHA256_DIGEST_SIZE); return; +#endif +#if defined(SHA_384) + case 48: sha_end2(hval, CTX_384(ctx), SHA384_DIGEST_SIZE); return; +#endif +#if defined(SHA_512) + case 64: sha_end2(hval, CTX_512(ctx), SHA512_DIGEST_SIZE); return; +#endif + } +} + +INT_RETURN sha2_all(unsigned char hval[], unsigned long size, + const unsigned char data[], unsigned long len) +{ sha2_ctx cx[1]; + + if(sha2_begin(size, cx) == EXIT_SUCCESS) + { + sha2_hash(data, len, cx); sha2_end(hval, cx); return EXIT_SUCCESS; + } + else + return EXIT_FAILURE; +} + +#endif + +#if defined(__cplusplus) +} +#endif diff --git a/libs/libks/crypt/sha2.h b/libs/libks/crypt/sha2.h new file mode 100644 index 0000000000..873cda7764 --- /dev/null +++ b/libs/libks/crypt/sha2.h @@ -0,0 +1,151 @@ +/* + --------------------------------------------------------------------------- + Copyright (c) 2002, Dr Brian Gladman, Worcester, UK. All rights reserved. + + LICENSE TERMS + + The free distribution and use of this software in both source and binary + form is allowed (with or without changes) provided that: + + 1. distributions of this source code include the above copyright + notice, this list of conditions and the following disclaimer; + + 2. distributions in binary form include the above copyright + notice, this list of conditions and the following disclaimer + in the documentation and/or other associated materials; + + 3. the copyright holder's name is not used to endorse products + built using this software without specific written permission. + + ALTERNATIVELY, provided that this notice is retained in full, this product + may be distributed under the terms of the GNU General Public License (GPL), + in which case the provisions of the GPL apply INSTEAD OF those given above. + + DISCLAIMER + + This software is provided 'as is' with no explicit or implied warranties + in respect of its properties, including, but not limited to, correctness + and/or fitness for purpose. + --------------------------------------------------------------------------- + Issue Date: 01/08/2005 +*/ + +#ifndef _SHA2_H +#define _SHA2_H + +#include + +#define SHA_64BIT + +/* define the hash functions that you need */ +#define SHA_2 /* for dynamic hash length */ +#define SHA_224 +#define SHA_256 +#ifdef SHA_64BIT +# define SHA_384 +# define SHA_512 +# define NEED_UINT_64T +#endif + +#include + +#if defined(__cplusplus) +extern "C" +{ +#endif + +/* Note that the following function prototypes are the same */ +/* for both the bit and byte oriented implementations. But */ +/* the length fields are in bytes or bits as is appropriate */ +/* for the version used. Bit sequences are arrays of bytes */ +/* in which bit sequence indexes increase from the most to */ +/* the least significant end of each byte */ + +#define SHA224_DIGEST_SIZE 28 +#define SHA224_BLOCK_SIZE 64 +#define SHA256_DIGEST_SIZE 32 +#define SHA256_BLOCK_SIZE 64 + +/* type to hold the SHA256 (and SHA224) context */ + +typedef struct +{ uint_32t count[2]; + uint_32t hash[8]; + uint_32t wbuf[16]; +} sha256_ctx; + +typedef sha256_ctx sha224_ctx; + +VOID_RETURN sha256_compile(sha256_ctx ctx[1]); + +VOID_RETURN sha224_begin(sha224_ctx ctx[1]); +#define sha224_hash sha256_hash +VOID_RETURN sha224_end(unsigned char hval[], sha224_ctx ctx[1]); +VOID_RETURN sha224_zrtp(unsigned char hval[], const unsigned char data[], unsigned long len); + +VOID_RETURN sha256_begin(sha256_ctx ctx[1]); +VOID_RETURN sha256_hash(const unsigned char data[], unsigned long len, sha256_ctx ctx[1]); +VOID_RETURN sha256_end(unsigned char hval[], sha256_ctx ctx[1]); +VOID_RETURN sha256_zrtp(unsigned char hval[], const unsigned char data[], unsigned long len); + +#ifndef SHA_64BIT + +typedef struct +{ union + { sha256_ctx ctx256[1]; + } uu[1]; + uint_32t sha2_len; +} sha2_ctx; + +#define SHA2_MAX_DIGEST_SIZE SHA256_DIGEST_SIZE + +#else + +#define SHA384_DIGEST_SIZE 48 +#define SHA384_BLOCK_SIZE 128 +#define SHA512_DIGEST_SIZE 64 +#define SHA512_BLOCK_SIZE 128 +#define SHA2_MAX_DIGEST_SIZE SHA512_DIGEST_SIZE + +/* type to hold the SHA384 (and SHA512) context */ + +typedef struct +{ uint_64t count[2]; + uint_64t hash[8]; + uint_64t wbuf[16]; +} sha512_ctx; + +typedef sha512_ctx sha384_ctx; + +typedef struct +{ union + { sha256_ctx ctx256[1]; + sha512_ctx ctx512[1]; + } uu[1]; + uint_32t sha2_len; +} sha2_ctx; + +VOID_RETURN sha512_compile(sha512_ctx ctx[1]); + +VOID_RETURN sha384_begin(sha384_ctx ctx[1]); +#define sha384_hash sha512_hash +VOID_RETURN sha384_end(unsigned char hval[], sha384_ctx ctx[1]); +VOID_RETURN sha384_zrtp(unsigned char hval[], const unsigned char data[], unsigned long len); + +VOID_RETURN sha512_begin(sha512_ctx ctx[1]); +VOID_RETURN sha512_hash(const unsigned char data[], unsigned long len, sha512_ctx ctx[1]); +VOID_RETURN sha512_end(unsigned char hval[], sha512_ctx ctx[1]); +VOID_RETURN sha512_zrtp(unsigned char hval[], const unsigned char data[], unsigned long len); + +INT_RETURN sha2_begin(unsigned long size, sha2_ctx ctx[1]); +VOID_RETURN sha2_hash(const unsigned char data[], unsigned long len, sha2_ctx ctx[1]); +VOID_RETURN sha2_end(unsigned char hval[], sha2_ctx ctx[1]); +INT_RETURN sha2_all(unsigned char hval[], unsigned long size, const unsigned char data[], unsigned long len); + +#endif + +#if defined(__cplusplus) +} +#endif + +#endif diff --git a/libs/libks/crypt/twofish.c b/libs/libks/crypt/twofish.c new file mode 100644 index 0000000000..45a07e2b22 --- /dev/null +++ b/libs/libks/crypt/twofish.c @@ -0,0 +1,1750 @@ +/* + * Fast, portable, and easy-to-use Twofish implementation, + * Version 0.3. + * Copyright (c) 2002 by Niels Ferguson. + * (See further down for the almost-unrestricted licensing terms.) + * + * -------------------------------------------------------------------------- + * There are two files for this implementation: + * - twofish.h, the header file. + * - twofish.c, the code file. + * + * To incorporate this code into your program you should: + * - Check the licensing terms further down in this comment. + * - Fix the two type definitions in twofish.h to suit your platform. + * - Fix a few definitions in twofish.c in the section marked + * PLATFORM FIXES. There is one important ones that affects + * functionality, and then a few definitions that you can optimise + * for efficiency but those have no effect on the functionality. + * Don't change anything else. + * - Put the code in your project and compile it. + * + * To use this library you should: + * - Call Twofish_initialise() in your program before any other function in + * this library. + * - Use Twofish_prepare_key(...) to convert a key to internal form. + * - Use Twofish_encrypt(...) and Twofish_decrypt(...) to encrypt and decrypt + * data. + * See the comments in the header file for details on these functions. + * -------------------------------------------------------------------------- + * + * There are many Twofish implementation available for free on the web. + * Most of them are hard to integrate into your own program. + * As we like people to use our cipher, I thought I would make it easier. + * Here is a free and easy-to-integrate Twofish implementation in C. + * The latest version is always available from my personal home page at + * http://niels.ferguson.net/ + * + * Integrating library code into a project is difficult because the library + * header files interfere with the project's header files and code. + * And of course the project's header files interfere with the library code. + * I've tried to resolve these problems here. + * The header file of this implementation is very light-weight. + * It contains two typedefs, a structure, and a few function declarations. + * All names it defines start with "Twofish_". + * The header file is therefore unlikely to cause problems in your project. + * The code file of this implementation doesn't need to include the header + * files of the project. There is thus no danger of the project interfering + * with all the definitions and macros of the Twofish code. + * In most situations, all you need to do is fill in a few platform-specific + * definitions in the header file and code file, + * and you should be able to run the Twofish code in your project. + * I estimate it should take you less than an hour to integrate this code + * into your project, most of it spent reading the comments telling you what + * to do. + * + * For people using C++: it is very easy to wrap this library into a + * TwofishKey class. One of the big advantages is that you can automate the + * wiping of the key material in the destructor. I have not provided a C++ + * class because the interface depends too much on the abstract base class + * you use for block ciphers in your program, which I don't know about. + * + * This implementation is designed for use on PC-class machines. It uses the + * Twofish 'full' keying option which uses large tables. Total table size is + * around 5-6 kB for static tables plus 4.5 kB for each pre-processed key. + * If you need an implementation that uses less memory, + * take a look at Brian Gladman's code on his web site: + * http://fp.gladman.plus.com/cryptography_technology/aes/ + * He has code for all AES candidates. + * His Twofish code has lots of options trading off table size vs. speed. + * You can also take a look at the optimised code by Doug Whiting on the + * Twofish web site + * http://www.counterpane.com/twofish.html + * which has loads of options. + * I believe these existing implementations are harder to re-use because they + * are not clean libraries and they impose requirements on the environment. + * This implementation is very careful to minimise those, + * and should be easier to integrate into any larger program. + * + * The default mode of this implementation is fully portable as it uses no + * behaviour not defined in the C standard. (This is harder than you think.) + * If you have any problems porting the default mode, please let me know + * so that I can fix the problem. (But only if this code is at fault, I + * don't fix compilers.) + * Most of the platform fixes are related to non-portable but faster ways + * of implementing certain functions. + * + * In general I've tried to make the code as fast as possible, at the expense + * of memory and code size. However, C does impose limits, and this + * implementation will be slower than an optimised assembler implementation. + * But beware of assembler implementations: a good Pentium implementation + * uses completely different code than a good Pentium II implementation. + * You basically have to re-write the assembly code for every generation of + * processor. Unless you are severely pressed for speed, stick with C. + * + * The initialisation routine of this implementation contains a self-test. + * If initialisation succeeds without calling the fatal routine, then + * the implementation works. I don't think you can break the implementation + * in such a way that it still passes the tests, unless you are malicious. + * In other words: if the initialisation routine returns, + * you have successfully ported the implementation. + * (Or not implemented the fatal routine properly, but that is your problem.) + * + * I'm indebted to many people who helped me in one way or another to write + * this code. During the design of Twofish and the AES process I had very + * extensive discussions of all implementation issues with various people. + * Doug Whiting in particular provided a wealth of information. The Twofish + * team spent untold hours discussion various cipher features, and their + * implementation. Brian Gladman implemented all AES candidates in C, + * and we had some fruitful discussions on how to implement Twofish in C. + * Jan Nieuwenhuizen tested this code on Linux using GCC. + * + * Now for the license: + * The author hereby grants a perpetual license to everybody to + * use this code for any purpose as long as the copyright message is included + * in the source code of this or any derived work. + * + * Yes, this means that you, your company, your club, and anyone else + * can use this code anywhere you want. You can change it and distribute it + * under the GPL, include it in your commercial product without releasing + * the source code, put it on the web, etc. + * The only thing you cannot do is remove my copyright message, + * or distribute any source code based on this implementation that does not + * include my copyright message. + * + * I appreciate a mention in the documentation or credits, + * but I understand if that is difficult to do. + * I also appreciate it if you tell me where and why you used my code. + * + * Please send any questions or comments to niels@ferguson.net + * + * Have Fun! + * + * Niels + */ + +/* + * DISCLAIMER: As I'm giving away my work for free, I'm of course not going + * to accept any liability of any form. This code, or the Twofish cipher, + * might very well be flawed; you have been warned. + * This software is provided as-is, without any kind of warrenty or + * guarantee. And that is really all you can expect when you download + * code for free from the Internet. + * + * I think it is really sad that disclaimers like this seem to be necessary. + * If people only had a little bit more common sense, and didn't come + * whining like little children every time something happens.... + */ + +/* + * Version history: + * Version 0.0, 2002-08-30 + * First written. + * Version 0.1, 2002-09-03 + * Added disclaimer. Improved self-tests. + * Version 0.2, 2002-09-09 + * Removed last non-portabilities. Default now works completely within + * the C standard. UInt32 can be larger than 32 bits without problems. + * Version 0.3, 2002-09-28 + * Bugfix: use instead of to adhere to ANSI/ISO. + * Rename BIG_ENDIAN macro to CPU_IS_BIG_ENDIAN. The gcc library + * header already defines BIG_ENDIAN, even though it is not + * supposed to. + */ + + +/* + * Minimum set of include files. + * You should not need any application-specific include files for this code. + * In fact, adding you own header files could break one of the many macros or + * functions in this file. Be very careful. + * Standard include files will probably be ok. + */ +#include +#include +#include +/* #include * for memset(), memcpy(), and memcmp() */ +#include "twofish.h" + + +/* + * PLATFORM FIXES + * ============== + * + * Fix the type definitions in twofish.h first! + * + * The following definitions have to be fixed for each particular platform + * you work on. If you have a multi-platform program, you no doubt have + * portable definitions that you can substitute here without changing the + * rest of the code. + */ + + +/* + * Function called if something is fatally wrong with the implementation. + * This fatal function is called when a coding error is detected in the + * Twofish implementation, or when somebody passes an obviously erroneous + * parameter to this implementation. There is not much you can do when + * the code contains bugs, so we just stop. + * + * The argument is a string. Ideally the fatal function prints this string + * as an error message. Whatever else this function does, it should never + * return. A typical implementation would stop the program completely after + * printing the error message. + * + * This default implementation is not very useful, + * but does not assume anything about your environment. + * It will at least let you know something is wrong.... + * I didn't want to include any libraries to print and error or so, + * as this makes the code much harder to integrate in a project. + * + * Note that the Twofish_fatal function may not return to the caller. + * Unfortunately this is not something the self-test can test for, + * so you have to make sure of this yourself. + * + * If you want to call an external function, be careful about including + * your own header files here. This code uses a lot of macros, and your + * header file could easily break it. Maybe the best solution is to use + * a separate extern statement for your fatal function. + */ +/* #define Twofish_fatal(pmsgx) { fprintf(stderr, pmsgx); exit(1); } */ +#define Twofish_fatal(pmsgx, code) { return(code); } + + +/* + * The rest of the settings are not important for the functionality + * of this Twofish implementation. That is, their default settings + * work on all platforms. You can change them to improve the + * speed of the implementation on your platform. Erroneous settings + * will result in erroneous implementations, but the self-test should + * catch those. + */ + + +/* + * Macros to rotate a Twofish_UInt32 value left or right by the + * specified number of bits. This should be a 32-bit rotation, + * and not rotation of, say, 64-bit values. + * + * Every encryption or decryption operation uses 32 of these rotations, + * so it is a good idea to make these macros efficient. + * + * This fully portable definition has one piece of tricky stuff. + * The UInt32 might be larger than 32 bits, so we have to mask + * any higher bits off. The simplest way to do this is to 'and' the + * value first with 0xffffffff and then shift it right. An optimising + * compiler that has a 32-bit type can optimise this 'and' away. + * + * Unfortunately there is no portable way of writing the constant + * 0xffffffff. You don't know which suffix to use (U, or UL?) + * The UINT32_MASK definition uses a bit of trickery. Shift-left + * is only defined if the shift amount is strictly less than the size + * of the UInt32, so we can't use (1<<32). The answer it to take the value + * 2, cast it to a UInt32, shift it left 31 positions, and subtract one. + * Another example of how to make something very simple extremely difficult. + * I hate C. + * + * The rotation macros are straightforward. + * They are only applied to UInt32 values, which are _unsigned_ + * so the >> operator must do a logical shift that brings in zeroes. + * On most platforms you will only need to optimise the ROL32 macro; the + * ROR32 macro is not inefficient on an optimising compiler as all rotation + * amounts in this code are known at compile time. + * + * On many platforms there is a faster solution. + * For example, MS compilers have the __rotl and __rotr functions + * that generate x86 rotation instructions. + */ +#define UINT32_MASK ( (((Twofish_UInt32)2)<<31) - 1 ) + +#ifndef _MSC_VER +#define ROL32(x,n) ( (x)<<(n) | ((x) & UINT32_MASK) >> (32-(n)) ) +#define ROR32(x,n) ( (x)>>(n) | ((x) & UINT32_MASK) << (32-(n)) ) +#else +#define ROL32(x,n) (_lrotl((x), (n))) +#define ROR32(x,n) (_lrotr((x), (n))) +#endif + +/* + * Select data type for q-table entries. + * + * Larger entry types cost more memory (1.5 kB), and might be faster + * or slower depending on the CPU and compiler details. + * + * This choice only affects the static data size and the key setup speed. + * Functionality, expanded key size, or encryption speed are not affected. + * Define to 1 to get large q-table entries. + */ +#define LARGE_Q_TABLE 0 /* default = 0 */ + + +/* + * Method to select a single byte from a UInt32. + * WARNING: non-portable code if set; might not work on all platforms. + * + * Inside the inner loop of Twofish it is necessary to access the 4 + * individual bytes of a UInt32. This can be done using either shifts + * and masks, or memory accesses. + * + * Set to 0 to use shift and mask operations for the byte selection. + * This is more ALU intensive. It is also fully portable. + * + * Set to 1 to use memory accesses. The UInt32 is stored in memory and + * the individual bytes are read from memory one at a time. + * This solution is more memory-intensive, and not fully portable. + * It might be faster on your platform, or not. If you use this option, + * make sure you set the CPU_IS_BIG_ENDIAN flag appropriately. + * + * This macro does not affect the conversion of the inputs and outputs + * of the cipher. See the CONVERT_USING_CASTS macro for that. + */ +#define SELECT_BYTE_FROM_UINT32_IN_MEMORY 0 /* default = 0 */ + + +/* + * Method used to read the input and write the output. + * WARNING: non-portable code if set; might not work on all platforms. + * + * Twofish operates on 32-bit words. The input to the cipher is + * a byte array, as is the output. The portable method of doing the + * conversion is a bunch of rotate and mask operations, but on many + * platforms it can be done faster using a cast. + * This only works if your CPU allows UInt32 accesses to arbitrary Byte + * addresses. + * + * Set to 0 to use the shift and mask operations. This is fully + * portable. . + * + * Set to 1 to use a cast. The Byte * is cast to a UInt32 *, and a + * UInt32 is read. If necessary (as indicated by the CPU_IS_BIG_ENDIAN + * macro) the byte order in the UInt32 is swapped. The reverse is done + * to write the output of the encryption/decryption. Make sure you set + * the CPU_IS_BIG_ENDIAN flag appropriately. + * This option does not work unless a UInt32 is exactly 32 bits. + * + * This macro only changes the reading/writing of the plaintext/ciphertext. + * See the SELECT_BYTE_FROM_UINT32_IN_MEMORY to affect the way in which + * a UInt32 is split into 4 bytes for the S-box selection. + */ +#define CONVERT_USING_CASTS 0 /* default = 0 */ + + +/* + * Endianness switch. + * Only relevant if SELECT_BYTE_FROM_UINT32_IN_MEMORY or + * CONVERT_USING_CASTS is set. + * + * Set to 1 on a big-endian machine, and to 0 on a little-endian machine. + * Twofish uses the little-endian convention (least significant byte first) + * and big-endian machines (using most significant byte first) + * have to do a few conversions. + * + * CAUTION: This code has never been tested on a big-endian machine, + * because I don't have access to one. Feedback appreciated. + */ +#define CPU_IS_BIG_ENDIAN 0 + + +/* + * Macro to reverse the order of the bytes in a UInt32. + * Used to convert to little-endian on big-endian machines. + * This macro is always tested, but only used in the encryption and + * decryption if CONVERT_USING_CASTS, and CPU_IS_BIG_ENDIAN + * are both set. In other words: this macro is only speed-critical if + * both these flags have been set. + * + * This default definition of SWAP works, but on many platforms there is a + * more efficient implementation. + */ +#define BSWAP(x) ((ROL32((x),8)&0x00ff00ff) | (ROR32((x),8) & 0xff00ff00)) + + +/* + * END OF PLATFORM FIXES + * ===================== + * + * You should not have to touch the rest of this file. + */ + + +/* + * Convert the external type names to some that are easier to use inside + * this file. I didn't want to use the names Byte and UInt32 in the + * header file, because many programs already define them and using two + * conventions at once can be very difficult. + * Don't change these definitions! Change the originals + * in twofish.h instead. + */ +/* A Byte must be an unsigned integer, 8 bits long. */ +/* typedef Twofish_Byte Byte; */ +/* A UInt32 must be an unsigned integer at least 32 bits long. */ +/* typedef Twofish_UInt32 UInt32; */ + + +/* + * Define a macro ENDIAN_CONVERT. + * + * We define a macro ENDIAN_CONVERT that performs a BSWAP on big-endian + * machines, and is the identity function on little-endian machines. + * The code then uses this macro without considering the endianness. + */ + +#if CPU_IS_BIG_ENDIAN +#define ENDIAN_CONVERT(x) BSWAP(x) +#else +#define ENDIAN_CONVERT(x) (x) +#endif + + +/* + * Compute byte offset within a UInt32 stored in memory. + * + * This is only used when SELECT_BYTE_FROM_UINT32_IN_MEMORY is set. + * + * The input is the byte number 0..3, 0 for least significant. + * Note the use of sizeof() to support UInt32 types that are larger + * than 4 bytes. + */ +#if CPU_IS_BIG_ENDIAN +#define BYTE_OFFSET( n ) (sizeof(Twofish_UInt32) - 1 - (n) ) +#else +#define BYTE_OFFSET( n ) (n) +#endif + + +/* + * Macro to get Byte no. b from UInt32 value X. + * We use two different definition, depending on the settings. + */ +#if SELECT_BYTE_FROM_UINT32_IN_MEMORY + /* Pick the byte from the memory in which X is stored. */ +#define SELECT_BYTE( X, b ) (((Twofish_Byte *)(&(X)))[BYTE_OFFSET(b)]) +#else + /* Portable solution: Pick the byte directly from the X value. */ +#define SELECT_BYTE( X, b ) (((X) >> (8*(b))) & 0xff) +#endif + + +/* Some shorthands because we use byte selection in large formulae. */ +#define b0(X) SELECT_BYTE((X),0) +#define b1(X) SELECT_BYTE((X),1) +#define b2(X) SELECT_BYTE((X),2) +#define b3(X) SELECT_BYTE((X),3) + + +/* + * We need macros to load and store UInt32 from/to byte arrays + * using the least-significant-byte-first convention. + * + * GET32( p ) gets a UInt32 in lsb-first form from four bytes pointed to + * by p. + * PUT32( v, p ) writes the UInt32 value v at address p in lsb-first form. + */ +#if CONVERT_USING_CASTS + + /* Get UInt32 from four bytes pointed to by p. */ +#define GET32( p ) ENDIAN_CONVERT( *((Twofish_UInt32 *)(p)) ) + /* Put UInt32 into four bytes pointed to by p */ +#define PUT32( v, p ) *((Twofish_UInt32 *)(p)) = ENDIAN_CONVERT(v) + +#else + + /* Get UInt32 from four bytes pointed to by p. */ +#define GET32( p ) \ + ( \ + (Twofish_UInt32)((p)[0]) \ + | (Twofish_UInt32)((p)[1])<< 8 \ + | (Twofish_UInt32)((p)[2])<<16 \ + | (Twofish_UInt32)((p)[3])<<24 \ + ) + /* Put UInt32 into four bytes pointed to by p */ +#define PUT32( v, p ) \ + (p)[0] = (Twofish_Byte)(((v) ) & 0xff); \ + (p)[1] = (Twofish_Byte)(((v) >> 8) & 0xff); \ + (p)[2] = (Twofish_Byte)(((v) >> 16) & 0xff); \ + (p)[3] = (Twofish_Byte)(((v) >> 24) & 0xff) + +#endif + +#ifdef ANDROID +/** + * Dummy function to disable some compiler optimizations. + * + * See comment in Twofish_cfb128_encrypt(). + */ +void Two_debugDummy(Twofish_Byte* in, Twofish_Byte* out, Twofish_Byte* ivec) +{ +} +#endif +/* + * Test the platform-specific macros. + * This function tests the macros defined so far to make sure the + * definitions are appropriate for this platform. + * If you make any mistake in the platform configuration, this should detect + * that and inform you what went wrong. + * Somewhere, someday, this is going to save somebody a lot of time, + * because misbehaving macros are hard to debug. + */ +static int test_platform() + { + /* Buffer with test values. */ + Twofish_Byte buf[] = {0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0}; + Twofish_UInt32 C; + Twofish_UInt32 x,y; + int i; + + /* + * Some sanity checks on the types that can't be done in compile time. + * A smart compiler will just optimise these tests away. + * The pre-processor doesn't understand different types, so we cannot + * do these checks in compile-time. + * + * I hate C. + * + * The first check in each case is to make sure the size is correct. + * The second check is to ensure that it is an unsigned type. + */ + if( ((Twofish_UInt32)((Twofish_UInt32)1 << 31) == 0) || ((Twofish_UInt32)-1 < 0 )) + { + Twofish_fatal( "Twofish code: Twofish_UInt32 type not suitable", ERR_UINT32 ); + } + if( (sizeof( Twofish_Byte ) != 1) || (((Twofish_Byte)-1) < 0) ) + { + Twofish_fatal( "Twofish code: Twofish_Byte type not suitable", ERR_BYTE ); + } + + /* + * Sanity-check the endianness conversions. + * This is just an aid to find problems. If you do the endianness + * conversion macros wrong you will fail the full cipher test, + * but that does not help you find the error. + * Always make it easy to find the bugs! + * + * Detail: There is no fully portable way of writing UInt32 constants, + * as you don't know whether to use the U or UL suffix. Using only U you + * might only be allowed 16-bit constants. Using UL you might get 64-bit + * constants which cannot be stored in a UInt32 without warnings, and + * which generally behave subtly different from a true UInt32. + * As long as we're just comparing with the constant, + * we can always use the UL suffix and at worst lose some efficiency. + * I use a separate '32-bit constant' macro in most of my other code. + * + * I hate C. + * + * Start with testing GET32. We test it on all positions modulo 4 + * to make sure we can handly any position of inputs. (Some CPUs + * do not allow non-aligned accesses which we would do if you used + * the CONVERT_USING_CASTS option. + */ + if( (GET32( buf ) != 0x78563412UL) || (GET32(buf+1) != 0x9a785634UL) + || (GET32( buf+2 ) != 0xbc9a7856UL) || (GET32(buf+3) != 0xdebc9a78UL) ) + { + Twofish_fatal( "Twofish code: GET32 not implemented properly", ERR_GET32 ); + } + + /* + * We can now use GET32 to test PUT32. + * We don't test the shifted versions. If GET32 can do that then + * so should PUT32. + */ + C = GET32( buf ); + PUT32( 3*C, buf ); + if( GET32( buf ) != 0x69029c36UL ) + { + Twofish_fatal( "Twofish code: PUT32 not implemented properly", ERR_PUT32 ); + } + + + /* Test ROL and ROR */ + for( i=1; i<32; i++ ) + { + /* Just a simple test. */ + x = ROR32( C, i ); + y = ROL32( C, i ); + x ^= (C>>i) ^ (C<<(32-i)); + /*y ^= (C<>(32-i)); */ + y ^= (C<>(32-i)); + x |= y; + /* + * Now all we check is that x is zero in the least significant + * 32 bits. Using the UL suffix is safe here, as it doesn't matter + * if we get a larger type. + */ + if( (x & 0xffffffffUL) != 0 ) + { + Twofish_fatal( "Twofish ROL or ROR not properly defined.", ERR_ROLR ); + } + } + + /* Test the BSWAP macro */ + if( BSWAP(C) != 0x12345678UL ) + { + /* + * The BSWAP macro should always work, even if you are not using it. + * A smart optimising compiler will just remove this entire test. + */ + Twofish_fatal( "BSWAP not properly defined.", ERR_BSWAP ); + } + + /* And we can test the b macros which use SELECT_BYTE. */ + if( (b0(C)!=0x12) || (b1(C) != 0x34) || (b2(C) != 0x56) || (b3(C) != 0x78) ) + { + /* + * There are many reasons why this could fail. + * Most likely is that CPU_IS_BIG_ENDIAN has the wrong value. + */ + Twofish_fatal( "Twofish code: SELECT_BYTE not implemented properly", ERR_SELECTB ); + } + return SUCCESS; + } + + +/* + * Finally, we can start on the Twofish-related code. + * You really need the Twofish specifications to understand this code. The + * best source is the Twofish book: + * "The Twofish Encryption Algorithm", by Bruce Schneier, John Kelsey, + * Doug Whiting, David Wagner, Chris Hall, and Niels Ferguson. + * you can also use the AES submission document of Twofish, which is + * available from my list of publications on my personal web site at + * http://niels.ferguson.net/. + * + * The first thing we do is write the testing routines. This is what the + * implementation has to satisfy in the end. We only test the external + * behaviour of the implementation of course. + */ + + +/* + * Perform a single self test on a (plaintext,ciphertext,key) triple. + * Arguments: + * key array of key bytes + * key_len length of key in bytes + * p plaintext + * c ciphertext + */ +static int test_vector( Twofish_Byte key[], int key_len, Twofish_Byte p[16], Twofish_Byte c[16] ) + { + Twofish_Byte tmp[16]; /* scratch pad. */ + Twofish_key xkey; /* The expanded key */ + int i; + + + /* Prepare the key */ + if ((i = Twofish_prepare_key( key, key_len, &xkey)) < 0) + return i; + + /* + * We run the test twice to ensure that the xkey structure + * is not damaged by the first encryption. + * Those are hideous bugs to find if you get them in an application. + */ + for( i=0; i<2; i++ ) + { + /* Encrypt and test */ + Twofish_encrypt( &xkey, p, tmp ); + if( memcmp( c, tmp, 16 ) != 0 ) + { + Twofish_fatal( "Twofish encryption failure", ERR_TEST_ENC ); + } + + /* Decrypt and test */ + Twofish_decrypt( &xkey, c, tmp ); + if( memcmp( p, tmp, 16 ) != 0 ) + { + Twofish_fatal( "Twofish decryption failure", ERR_TEST_DEC ); + } + } + + /* The test keys are not secret, so we don't need to wipe xkey. */ + return SUCCESS; + } + + +/* + * Check implementation using three (key,plaintext,ciphertext) + * test vectors, one for each major key length. + * + * This is an absolutely minimal self-test. + * This routine does not test odd-sized keys. + */ +static int test_vectors() + { + /* + * We run three tests, one for each major key length. + * These test vectors come from the Twofish specification. + * One encryption and one decryption using randomish data and key + * will detect almost any error, especially since we generate the + * tables ourselves, so we don't have the problem of a single + * damaged table entry in the source. + */ + + /* 128-bit test is the I=3 case of section B.2 of the Twofish book. */ + static Twofish_Byte k128[] = { + 0x9F, 0x58, 0x9F, 0x5C, 0xF6, 0x12, 0x2C, 0x32, + 0xB6, 0xBF, 0xEC, 0x2F, 0x2A, 0xE8, 0xC3, 0x5A, + }; + static Twofish_Byte p128[] = { + 0xD4, 0x91, 0xDB, 0x16, 0xE7, 0xB1, 0xC3, 0x9E, + 0x86, 0xCB, 0x08, 0x6B, 0x78, 0x9F, 0x54, 0x19 + }; + static Twofish_Byte c128[] = { + 0x01, 0x9F, 0x98, 0x09, 0xDE, 0x17, 0x11, 0x85, + 0x8F, 0xAA, 0xC3, 0xA3, 0xBA, 0x20, 0xFB, 0xC3 + }; + + /* 192-bit test is the I=4 case of section B.2 of the Twofish book. */ + static Twofish_Byte k192[] = { + 0x88, 0xB2, 0xB2, 0x70, 0x6B, 0x10, 0x5E, 0x36, + 0xB4, 0x46, 0xBB, 0x6D, 0x73, 0x1A, 0x1E, 0x88, + 0xEF, 0xA7, 0x1F, 0x78, 0x89, 0x65, 0xBD, 0x44 + }; + static Twofish_Byte p192[] = { + 0x39, 0xDA, 0x69, 0xD6, 0xBA, 0x49, 0x97, 0xD5, + 0x85, 0xB6, 0xDC, 0x07, 0x3C, 0xA3, 0x41, 0xB2 + }; + static Twofish_Byte c192[] = { + 0x18, 0x2B, 0x02, 0xD8, 0x14, 0x97, 0xEA, 0x45, + 0xF9, 0xDA, 0xAC, 0xDC, 0x29, 0x19, 0x3A, 0x65 + }; + + /* 256-bit test is the I=4 case of section B.2 of the Twofish book. */ + static Twofish_Byte k256[] = { + 0xD4, 0x3B, 0xB7, 0x55, 0x6E, 0xA3, 0x2E, 0x46, + 0xF2, 0xA2, 0x82, 0xB7, 0xD4, 0x5B, 0x4E, 0x0D, + 0x57, 0xFF, 0x73, 0x9D, 0x4D, 0xC9, 0x2C, 0x1B, + 0xD7, 0xFC, 0x01, 0x70, 0x0C, 0xC8, 0x21, 0x6F + }; + static Twofish_Byte p256[] = { + 0x90, 0xAF, 0xE9, 0x1B, 0xB2, 0x88, 0x54, 0x4F, + 0x2C, 0x32, 0xDC, 0x23, 0x9B, 0x26, 0x35, 0xE6 + }; + static Twofish_Byte c256[] = { + 0x6C, 0xB4, 0x56, 0x1C, 0x40, 0xBF, 0x0A, 0x97, + 0x05, 0x93, 0x1C, 0xB6, 0xD4, 0x08, 0xE7, 0xFA + }; + + int ret; + + /* Run the actual tests. */ + if ((ret = test_vector( k128, 16, p128, c128 )) < 0) + return ret; + if ((ret = test_vector( k192, 24, p192, c192 )) < 0) + return ret; + if ((ret = test_vector( k256, 32, p256, c256 )) < 0) + return ret; + return SUCCESS; + } + + +/* + * Perform extensive test for a single key size. + * + * Test a single key size against the test vectors from section + * B.2 in the Twofish book. This is a sequence of 49 encryptions + * and decryptions. Each plaintext is equal to the ciphertext of + * the previous encryption. The key is made up from the ciphertext + * two and three encryptions ago. Both plaintext and key start + * at the zero value. + * We should have designed a cleaner recurrence relation for + * these tests, but it is too late for that now. At least we learned + * how to do it better next time. + * For details see appendix B of the book. + * + * Arguments: + * key_len Number of bytes of key + * final_value Final plaintext value after 49 iterations + */ +static int test_sequence( int key_len, Twofish_Byte final_value[] ) + { + Twofish_Byte buf[ (50+3)*16 ]; /* Buffer to hold our computation values. */ + Twofish_Byte tmp[16]; /* Temp for testing the decryption. */ + Twofish_key xkey; /* The expanded key */ + int i, ret; + Twofish_Byte * p; + + /* Wipe the buffer */ + memset( buf, 0, sizeof( buf ) ); + + /* + * Because the recurrence relation is done in an inconvenient manner + * we end up looping backwards over the buffer. + */ + + /* Pointer in buffer points to current plaintext. */ + p = &buf[50*16]; + for( i=1; i<50; i++ ) + { + /* + * Prepare a key. + * This automatically checks that key_len is valid. + */ + if ((ret = Twofish_prepare_key( p+16, key_len, &xkey)) < 0) + return ret; + + /* Compute the next 16 bytes in the buffer */ + Twofish_encrypt( &xkey, p, p-16 ); + + /* Check that the decryption is correct. */ + Twofish_decrypt( &xkey, p-16, tmp ); + if( memcmp( tmp, p, 16 ) != 0 ) + { + Twofish_fatal( "Twofish decryption failure in sequence", ERR_SEQ_DEC ); + } + /* Move on to next 16 bytes in the buffer. */ + p -= 16; + } + + /* And check the final value. */ + if( memcmp( p, final_value, 16 ) != 0 ) + { + Twofish_fatal( "Twofish encryption failure in sequence", ERR_SEQ_ENC ); + } + + /* None of the data was secret, so there is no need to wipe anything. */ + return SUCCESS; + } + + +/* + * Run all three sequence tests from the Twofish test vectors. + * + * This checks the most extensive test vectors currently available + * for Twofish. The data is from the Twofish book, appendix B.2. + */ +static int test_sequences() + { + static Twofish_Byte r128[] = { + 0x5D, 0x9D, 0x4E, 0xEF, 0xFA, 0x91, 0x51, 0x57, + 0x55, 0x24, 0xF1, 0x15, 0x81, 0x5A, 0x12, 0xE0 + }; + static Twofish_Byte r192[] = { + 0xE7, 0x54, 0x49, 0x21, 0x2B, 0xEE, 0xF9, 0xF4, + 0xA3, 0x90, 0xBD, 0x86, 0x0A, 0x64, 0x09, 0x41 + }; + static Twofish_Byte r256[] = { + 0x37, 0xFE, 0x26, 0xFF, 0x1C, 0xF6, 0x61, 0x75, + 0xF5, 0xDD, 0xF4, 0xC3, 0x3B, 0x97, 0xA2, 0x05 + }; + + /* Run the three sequence test vectors */ + int ret; + if ((ret = test_sequence( 16, r128)) < 0) + return ret; + if ((ret = test_sequence( 24, r192)) < 0) + return ret; + if ((ret = test_sequence( 32, r256)) < 0) + return ret; + return SUCCESS; + } + + +/* + * Test the odd-sized keys. + * + * Every odd-sized key is equivalent to a one of 128, 192, or 256 bits. + * The equivalent key is found by padding at the end with zero bytes + * until a regular key size is reached. + * + * We just test that the key expansion routine behaves properly. + * If the expanded keys are identical, then the encryptions and decryptions + * will behave the same. + */ +static int test_odd_sized_keys() + { + Twofish_Byte buf[32]; + Twofish_key xkey; + Twofish_key xkey_two; + int i, ret; + + /* + * We first create an all-zero key to use as PRNG key. + * Normally we would not have to fill the buffer with zeroes, as we could + * just pass a zero key length to the Twofish_prepare_key function. + * However, this relies on using odd-sized keys, and those are just the + * ones we are testing here. We can't use an untested function to test + * itself. + */ + memset( buf, 0, sizeof( buf ) ); + if ((ret = Twofish_prepare_key( buf, 16, &xkey)) < 0) + return ret; + + /* Fill buffer with pseudo-random data derived from two encryptions */ + Twofish_encrypt( &xkey, buf, buf ); + Twofish_encrypt( &xkey, buf, buf+16 ); + + /* Create all possible shorter keys that are prefixes of the buffer. */ + for( i=31; i>=0; i-- ) + { + /* Set a byte to zero. This is the new padding byte */ + buf[i] = 0; + + /* Expand the key with only i bytes of length */ + if ((ret = Twofish_prepare_key( buf, i, &xkey)) < 0) + return ret; + + /* Expand the corresponding padded key of regular length */ + if ((ret = Twofish_prepare_key( buf, i<=16 ? 16 : (i<= 24 ? 24 : 32), &xkey_two )) < 0) + return ret; + + /* Compare the two */ + if( memcmp( &xkey, &xkey_two, sizeof( xkey ) ) != 0 ) + { + Twofish_fatal( "Odd sized keys do not expand properly", ERR_ODD_KEY ); + } + } + + /* None of the key values are secret, so we don't need to wipe them. */ + return SUCCESS; + } + + +/* + * Test the Twofish implementation. + * + * This routine runs all the self tests, in order of importance. + * It is called by the Twofish_initialise routine. + * + * In almost all applications the cost of running the self tests during + * initialisation is insignificant, especially + * compared to the time it takes to load the application from disk. + * If you are very pressed for initialisation performance, + * you could remove some of the tests. Make sure you did run them + * once in the software and hardware configuration you are using. + */ +static int self_test() + { + int ret; + /* The three test vectors form an absolute minimal test set. */ + if ((ret = test_vectors()) < 0) + return ret; + + /* + * If at all possible you should run these tests too. They take + * more time, but provide a more thorough coverage. + */ + if ((ret = test_sequences()) < 0) + return ret; + + /* Test the odd-sized keys. */ + if ((ret = test_odd_sized_keys()) < 0) + return ret; + return SUCCESS; + } + + +/* + * And now, the actual Twofish implementation. + * + * This implementation generates all the tables during initialisation. + * I don't like large tables in the code, especially since they are easily + * damaged in the source without anyone noticing it. You need code to + * generate them anyway, and this way all the code is close together. + * Generating them in the application leads to a smaller executable + * (the code is smaller than the tables it generates) and a + * larger static memory footprint. + * + * Twofish can be implemented in many ways. I have chosen to + * use large tables with a relatively long key setup time. + * If you encrypt more than a few blocks of data it pays to pre-compute + * as much as possible. This implementation is relatively inefficient for + * applications that need to re-key every block or so. + */ + +/* + * We start with the t-tables, directly from the Twofish definition. + * These are nibble-tables, but merging them and putting them two nibbles + * in one byte is more work than it is worth. + */ +static Twofish_Byte t_table[2][4][16] = { + { + {0x8,0x1,0x7,0xD,0x6,0xF,0x3,0x2,0x0,0xB,0x5,0x9,0xE,0xC,0xA,0x4}, + {0xE,0xC,0xB,0x8,0x1,0x2,0x3,0x5,0xF,0x4,0xA,0x6,0x7,0x0,0x9,0xD}, + {0xB,0xA,0x5,0xE,0x6,0xD,0x9,0x0,0xC,0x8,0xF,0x3,0x2,0x4,0x7,0x1}, + {0xD,0x7,0xF,0x4,0x1,0x2,0x6,0xE,0x9,0xB,0x3,0x0,0x8,0x5,0xC,0xA} + }, + { + {0x2,0x8,0xB,0xD,0xF,0x7,0x6,0xE,0x3,0x1,0x9,0x4,0x0,0xA,0xC,0x5}, + {0x1,0xE,0x2,0xB,0x4,0xC,0x3,0x7,0x6,0xD,0xA,0x5,0xF,0x9,0x0,0x8}, + {0x4,0xC,0x7,0x5,0x1,0x6,0x9,0xA,0x0,0xE,0xD,0x8,0x2,0xB,0x3,0xF}, + {0xB,0x9,0x5,0x1,0xC,0x3,0xD,0xE,0x6,0x4,0x7,0xF,0x2,0x0,0x8,0xA} + } +}; + + +/* A 1-bit rotation of 4-bit values. Input must be in range 0..15 */ +#define ROR4BY1( x ) (((x)>>1) | (((x)<<3) & 0x8) ) + +/* + * The q-boxes are only used during the key schedule computations. + * These are 8->8 bit lookup tables. Some CPUs prefer to have 8->32 bit + * lookup tables as it is faster to load a 32-bit value than to load an + * 8-bit value and zero the rest of the register. + * The LARGE_Q_TABLE switch allows you to choose 32-bit entries in + * the q-tables. Here we just define the Qtype which is used to store + * the entries of the q-tables. + */ +#if LARGE_Q_TABLE +typedef Twofish_UInt32 Qtype; +#else +typedef Twofish_Byte Qtype; +#endif + +/* + * The actual q-box tables. + * There are two q-boxes, each having 256 entries. + */ +static Qtype q_table[2][256]; + + +/* + * Now the function that converts a single t-table into a q-table. + * + * Arguments: + * t[4][16] : four 4->4bit lookup tables that define the q-box + * q[256] : output parameter: the resulting q-box as a lookup table. + */ +static void make_q_table( Twofish_Byte t[4][16], Qtype q[256] ) + { + int ae,be,ao,bo; /* Some temporaries. */ + int i; + /* Loop over all input values and compute the q-box result. */ + for( i=0; i<256; i++ ) { + /* + * This is straight from the Twofish specifications. + * + * The ae variable is used for the a_i values from the specs + * with even i, and ao for the odd i's. Similarly for the b's. + */ + ae = i>>4; be = i&0xf; + ao = ae ^ be; bo = ae ^ ROR4BY1(be) ^ ((ae<<3)&8); + ae = t[0][ao]; be = t[1][bo]; + ao = ae ^ be; bo = ae ^ ROR4BY1(be) ^ ((ae<<3)&8); + ae = t[2][ao]; be = t[3][bo]; + + /* Store the result in the q-box table, the cast avoids a warning. */ + q[i] = (Qtype) ((be<<4) | ae); + } + } + + +/* + * Initialise both q-box tables. + */ +static void initialise_q_boxes() { + /* Initialise each of the q-boxes using the t-tables */ + make_q_table( t_table[0], q_table[0] ); + make_q_table( t_table[1], q_table[1] ); + } + + +/* + * Next up is the MDS matrix multiplication. + * The MDS matrix multiplication operates in the field + * GF(2)[x]/p(x) with p(x)=x^8+x^6+x^5+x^3+1. + * If you don't understand this, read a book on finite fields. You cannot + * follow the finite-field computations without some background. + * + * In this field, multiplication by x is easy: shift left one bit + * and if bit 8 is set then xor the result with 0x169. + * + * The MDS coefficients use a multiplication by 1/x, + * or rather a division by x. This is easy too: first make the + * value 'even' (i.e. bit 0 is zero) by xorring with 0x169 if necessary, + * and then shift right one position. + * Even easier: shift right and xor with 0xb4 if the lsbit was set. + * + * The MDS coefficients are 1, EF, and 5B, and we use the fact that + * EF = 1 + 1/x + 1/x^2 + * 5B = 1 + 1/x^2 + * in this field. This makes multiplication by EF and 5B relatively easy. + * + * This property is no accident, the MDS matrix was designed to allow + * this implementation technique to be used. + * + * We have four MDS tables, each mapping 8 bits to 32 bits. + * Each table performs one column of the matrix multiplication. + * As the MDS is always preceded by q-boxes, each of these tables + * also implements the q-box just previous to that column. + */ + +/* The actual MDS tables. */ +static Twofish_UInt32 MDS_table[4][256]; + +/* A small table to get easy conditional access to the 0xb4 constant. */ +static Twofish_UInt32 mds_poly_divx_const[] = {0,0xb4}; + +/* Function to initialise the MDS tables. */ +static void initialise_mds_tables() + { + int i; + Twofish_UInt32 q,qef,q5b; /* Temporary variables. */ + + /* Loop over all 8-bit input values */ + for( i=0; i<256; i++ ) + { + /* + * To save some work during the key expansion we include the last + * of the q-box layers from the h() function in these MDS tables. + */ + + /* We first do the inputs that are mapped through the q0 table. */ + q = q_table[0][i]; + /* + * Here we divide by x, note the table to get 0xb4 only if the + * lsbit is set. + * This sets qef = (1/x)*q in the finite field + */ + qef = (q >> 1) ^ mds_poly_divx_const[ q & 1 ]; + /* + * Divide by x again, and add q to get (1+1/x^2)*q. + * Note that (1+1/x^2) = 5B in the field, and addition in the field + * is exclusive or on the bits. + */ + q5b = (qef >> 1) ^ mds_poly_divx_const[ qef & 1 ] ^ q; + /* + * Add q5b to qef to set qef = (1+1/x+1/x^2)*q. + * Again, (1+1/x+1/x^2) = EF in the field. + */ + qef ^= q5b; + + /* + * Now that we have q5b = 5B * q and qef = EF * q + * we can fill two of the entries in the MDS matrix table. + * See the Twofish specifications for the order of the constants. + */ + MDS_table[1][i] = (q <<24) | (q5b<<16) | (qef<<8) | qef; + MDS_table[3][i] = (q5b<<24) | (qef<<16) | (q <<8) | q5b; + + /* Now we do it all again for the two columns that have a q1 box. */ + q = q_table[1][i]; + qef = (q >> 1) ^ mds_poly_divx_const[ q & 1 ]; + q5b = (qef >> 1) ^ mds_poly_divx_const[ qef & 1 ] ^ q; + qef ^= q5b; + + /* The other two columns use the coefficient in a different order. */ + MDS_table[0][i] = (qef<<24) | (qef<<16) | (q5b<<8) | q ; + MDS_table[2][i] = (qef<<24) | (q <<16) | (qef<<8) | q5b; + } + } + + +/* + * The h() function is the heart of the Twofish cipher. + * It is a complicated sequence of q-box lookups, key material xors, + * and finally the MDS matrix. + * We use lots of macros to make this reasonably fast. + */ + +/* First a shorthand for the two q-tables */ +#define q0 q_table[0] +#define q1 q_table[1] + +/* + * Each macro computes one column of the h for either 2, 3, or 4 stages. + * As there are 4 columns, we have 12 macros in all. + * + * The key bytes are stored in the Byte array L at offset + * 0,1,2,3, 8,9,10,11, [16,17,18,19, [24,25,26,27]] as this is the + * order we get the bytes from the user. If you look at the Twofish + * specs, you'll see that h() is applied to the even key words or the + * odd key words. The bytes of the even words appear in this spacing, + * and those of the odd key words too. + * + * These macros are the only place where the q-boxes and the MDS table + * are used. + */ +#define H02( y, L ) MDS_table[0][q0[q0[y]^L[ 8]]^L[0]] +#define H12( y, L ) MDS_table[1][q0[q1[y]^L[ 9]]^L[1]] +#define H22( y, L ) MDS_table[2][q1[q0[y]^L[10]]^L[2]] +#define H32( y, L ) MDS_table[3][q1[q1[y]^L[11]]^L[3]] +#define H03( y, L ) H02( q1[y]^L[16], L ) +#define H13( y, L ) H12( q1[y]^L[17], L ) +#define H23( y, L ) H22( q0[y]^L[18], L ) +#define H33( y, L ) H32( q0[y]^L[19], L ) +#define H04( y, L ) H03( q1[y]^L[24], L ) +#define H14( y, L ) H13( q0[y]^L[25], L ) +#define H24( y, L ) H23( q0[y]^L[26], L ) +#define H34( y, L ) H33( q1[y]^L[27], L ) + +/* + * Now we can define the h() function given an array of key bytes. + * This function is only used in the key schedule, and not to pre-compute + * the keyed S-boxes. + * + * In the key schedule, the input is always of the form k*(1+2^8+2^16+2^24) + * so we only provide k as an argument. + * + * Arguments: + * k input to the h() function. + * L pointer to array of key bytes at + * offsets 0,1,2,3, ... 8,9,10,11, [16,17,18,19, [24,25,26,27]] + * kCycles # key cycles, 2, 3, or 4. + */ +static Twofish_UInt32 h( int k, Twofish_Byte L[], int kCycles ) + { + switch( kCycles ) { + /* We code all 3 cases separately for speed reasons. */ + case 2: + return H02(k,L) ^ H12(k,L) ^ H22(k,L) ^ H32(k,L); + case 3: + return H03(k,L) ^ H13(k,L) ^ H23(k,L) ^ H33(k,L); + case 4: + return H04(k,L) ^ H14(k,L) ^ H24(k,L) ^ H34(k,L); + default: + /* This is always a coding error, which is fatal. */ + Twofish_fatal( "Twofish h(): Illegal argument", ERR_ILL_ARG ); + return ERR_ILL_ARG; + } + } + + +/* + * Pre-compute the keyed S-boxes. + * Fill the pre-computed S-box array in the expanded key structure. + * Each pre-computed S-box maps 8 bits to 32 bits. + * + * The S argument contains half the number of bytes of the full key, but is + * derived from the full key. (See Twofish specifications for details.) + * S has the weird byte input order used by the Hxx macros. + * + * This function takes most of the time of a key expansion. + * + * Arguments: + * S pointer to array of 8*kCycles Bytes containing the S vector. + * kCycles number of key words, must be in the set {2,3,4} + * xkey pointer to Twofish_key structure that will contain the S-boxes. + */ +static int fill_keyed_sboxes( Twofish_Byte S[], int kCycles, Twofish_key * xkey ) + { + int i; + switch( kCycles ) { + /* We code all 3 cases separately for speed reasons. */ + case 2: + for( i=0; i<256; i++ ) + { + xkey->s[0][i]= H02( i, S ); + xkey->s[1][i]= H12( i, S ); + xkey->s[2][i]= H22( i, S ); + xkey->s[3][i]= H32( i, S ); + } + break; + case 3: + for( i=0; i<256; i++ ) + { + xkey->s[0][i]= H03( i, S ); + xkey->s[1][i]= H13( i, S ); + xkey->s[2][i]= H23( i, S ); + xkey->s[3][i]= H33( i, S ); + } + break; + case 4: + for( i=0; i<256; i++ ) + { + xkey->s[0][i]= H04( i, S ); + xkey->s[1][i]= H14( i, S ); + xkey->s[2][i]= H24( i, S ); + xkey->s[3][i]= H34( i, S ); + } + break; + default: + /* This is always a coding error, which is fatal. */ + Twofish_fatal( "Twofish fill_keyed_sboxes(): Illegal argument", ERR_ILL_ARG ); + } + return SUCCESS; + } + + +/* A flag to keep track of whether we have been initialised or not. */ +static int Twofish_initialised = 0; + +/* + * Initialise the Twofish implementation. + * This function must be called before any other function in the + * Twofish implementation is called. + * This routine also does some sanity checks, to make sure that + * all the macros behave, and it tests the whole cipher. + */ +int Twofish_initialise() + { + int ret; + /* First test the various platform-specific definitions. */ + if ((ret = test_platform()) < 0) + return ret; + + /* We can now generate our tables, in the right order of course. */ + initialise_q_boxes(); + initialise_mds_tables(); + + /* We're finished with the initialisation itself. */ + Twofish_initialised = 1; + + /* + * And run some tests on the whole cipher. + * Yes, you need to do this every time you start your program. + * It is called assurance; you have to be certain that your program + * still works properly. + */ + return self_test(); + } + + +/* + * The Twofish key schedule uses an Reed-Solomon code matrix multiply. + * Just like the MDS matrix, the RS-matrix is designed to be easy + * to implement. Details are below in the code. + * + * These constants make it easy to compute in the finite field used + * for the RS code. + * + * We use Bytes for the RS computation, but these are automatically + * widened to unsigned integers in the expressions. Having unsigned + * ints in these tables therefore provides the fastest access. + */ +static unsigned int rs_poly_const[] = {0, 0x14d}; +static unsigned int rs_poly_div_const[] = {0, 0xa6 }; + +/* + * memset_volatile is a volatile pointer to the memset function. + * You can call (*memset_volatile)(buf, val, len) or even + * memset_volatile(buf, val, len) just as you would call + * memset(buf, val, len), but the use of a volatile pointer + * guarantees that the compiler will not optimise the call away. + */ +static void * (*volatile memset_volatile)(void *, int, size_t) = memset; + +/* + * Prepare a key for use in encryption and decryption. + * Like most block ciphers, Twofish allows the key schedule + * to be pre-computed given only the key. + * Twofish has a fairly 'heavy' key schedule that takes a lot of time + * to compute. The main work is pre-computing the S-boxes used in the + * encryption and decryption. We feel that this makes the cipher much + * harder to attack. The attacker doesn't even know what the S-boxes + * contain without including the entire key schedule in the analysis. + * + * Unlike most Twofish implementations, this one allows any key size from + * 0 to 32 bytes. Odd key sizes are defined for Twofish (see the + * specifications); the key is simply padded with zeroes to the next real + * key size of 16, 24, or 32 bytes. + * Each odd-sized key is thus equivalent to a single normal-sized key. + * + * Arguments: + * key array of key bytes + * key_len number of bytes in the key, must be in the range 0,...,32. + * xkey Pointer to an Twofish_key structure that will be filled + * with the internal form of the cipher key. + */ +int Twofish_prepare_key( Twofish_Byte key[], int key_len, Twofish_key * xkey ) + { + /* We use a single array to store all key material in, + * to simplify the wiping of the key material at the end. + * The first 32 bytes contain the actual (padded) cipher key. + * The next 32 bytes contain the S-vector in its weird format, + * and we have 4 bytes of overrun necessary for the RS-reduction. + */ + Twofish_Byte K[32+32+4]; + + int kCycles; /* # key cycles, 2,3, or 4. */ + + int i; + Twofish_UInt32 A, B; /* Used to compute the round keys. */ + + Twofish_Byte * kptr; /* Three pointers for the RS computation. */ + Twofish_Byte * sptr; + Twofish_Byte * t; + + Twofish_Byte b,bx,bxx; /* Some more temporaries for the RS computation. */ + + /* Check that the Twofish implementation was initialised. */ + if( Twofish_initialised == 0 ) + { + /* + * You didn't call Twofish_initialise before calling this routine. + * This is a programming error, and therefore we call the fatal + * routine. + * + * I could of course call the initialisation routine here, + * but there are a few reasons why I don't. First of all, the + * self-tests have to be done at startup. It is no good to inform + * the user that the cipher implementation fails when he wants to + * write his data to disk in encrypted form. You have to warn him + * before he spends time typing his data. Second, the initialisation + * and self test are much slower than a single key expansion. + * Calling the initialisation here makes the performance of the + * cipher unpredictable. This can lead to really weird problems + * if you use the cipher for a real-time task. Suddenly it fails + * once in a while the first time you try to use it. Things like + * that are almost impossible to debug. + */ + /* Twofish_fatal( "Twofish implementation was not initialised.", ERR_INIT ); */ + + /* + * There is always a danger that the Twofish_fatal routine returns, + * in spite of the specifications that it should not. + * (A good programming rule: don't trust the rest of the code.) + * This would be disasterous. If the q-tables and MDS-tables have + * not been initialised, they are probably still filled with zeroes. + * Suppose the MDS-tables are all zero. The key expansion would then + * generate all-zero round keys, and all-zero s-boxes. The danger + * is that nobody would notice as the encry + * mangles the input, and the decryption still 'decrypts' it, + * but now in a completely key-independent manner. + * To stop such security disasters, we use blunt force. + * If your program hangs here: fix the fatal routine! + */ + for(;;); /* Infinite loop, which beats being insecure. */ + } + + /* Check for valid key length. */ + if( key_len < 0 || key_len > 32 ) + { + /* + * This can only happen if a programmer didn't read the limitations + * on the key size. + */ + Twofish_fatal( "Twofish_prepare_key: illegal key length", ERR_KEY_LEN ); + /* + * A return statement just in case the fatal macro returns. + * The rest of the code assumes that key_len is in range, and would + * buffer-overflow if it wasn't. + * + * Why do we still use a programming language that has problems like + * buffer overflows, when these problems were solved in 1960 with + * the development of Algol? Have we not leared anything? + */ + return ERR_KEY_LEN; + } + + /* Pad the key with zeroes to the next suitable key length. */ + memcpy( K, key, key_len ); + memset( K+key_len, 0, sizeof(K)-key_len ); + + /* + * Compute kCycles: the number of key cycles used in the cipher. + * 2 for 128-bit keys, 3 for 192-bit keys, and 4 for 256-bit keys. + */ + kCycles = (key_len + 7) >> 3; + /* Handle the special case of very short keys: minimum 2 cycles. */ + if( kCycles < 2 ) + { + kCycles = 2; + } + + /* + * From now on we just pretend to have 8*kCycles bytes of + * key material in K. This handles all the key size cases. + */ + + /* + * We first compute the 40 expanded key words, + * formulas straight from the Twofish specifications. + */ + for( i=0; i<40; i+=2 ) + { + /* + * Due to the byte spacing expected by the h() function + * we can pick the bytes directly from the key K. + * As we use bytes, we never have the little/big endian + * problem. + * + * Note that we apply the rotation function only to simple + * variables, as the rotation macro might evaluate its argument + * more than once. + */ + A = h( i , K , kCycles ); + B = h( i+1, K+4, kCycles ); + B = ROL32( B, 8 ); + + /* Compute and store the round keys. */ + A += B; + B += A; + xkey->K[i] = A; + xkey->K[i+1] = ROL32( B, 9 ); + } + + /* Wipe variables that contained key material. */ + A=B=0; + + /* + * And now the dreaded RS multiplication that few seem to understand. + * The RS matrix is not random, and is specially designed to compute the + * RS matrix multiplication in a simple way. + * + * We work in the field GF(2)[x]/x^8+x^6+x^3+x^2+1. Note that this is a + * different field than used for the MDS matrix. + * (At least, it is a different representation because all GF(2^8) + * representations are equivalent in some form.) + * + * We take 8 consecutive bytes of the key and interpret them as + * a polynomial k_0 + k_1 y + k_2 y^2 + ... + k_7 y^7 where + * the k_i bytes are the key bytes and are elements of the finite field. + * We multiply this polynomial by y^4 and reduce it modulo + * y^4 + (x + 1/x)y^3 + (x)y^2 + (x + 1/x)y + 1. + * using straightforward polynomial modulo reduction. + * The coefficients of the result are the result of the RS + * matrix multiplication. When we wrote the Twofish specification, + * the original RS definition used the polynomials, + * but that requires much more mathematical knowledge. + * We were already using matrix multiplication in a finite field for + * the MDS matrix, so I re-wrote the RS operation as a matrix + * multiplication to reduce the difficulty of understanding it. + * Some implementors have not picked up on this simpler method of + * computing the RS operation, even though it is mentioned in the + * specifications. + * + * It is possible to perform these computations faster by using 32-bit + * word operations, but that is not portable and this is not a speed- + * critical area. + * + * We explained the 1/x computation when we did the MDS matrix. + * + * The S vector is stored in K[32..64]. + * The S vector has to be reversed, so we loop cross-wise. + * + * Note the weird byte spacing of the S-vector, to match the even + * or odd key words arrays. See the discussion at the Hxx macros for + * details. + */ + kptr = K + 8*kCycles; /* Start at end of key */ + sptr = K + 32; /* Start at start of S */ + + /* Loop over all key material */ + while( kptr > K ) + { + kptr -= 8; + /* + * Initialise the polynimial in sptr[0..12] + * The first four coefficients are 0 as we have to multiply by y^4. + * The next 8 coefficients are from the key material. + */ + memset( sptr, 0, 4 ); + memcpy( sptr+4, kptr, 8 ); + + /* + * The 12 bytes starting at sptr are now the coefficients of + * the polynomial we need to reduce. + */ + + /* Loop over the polynomial coefficients from high to low */ + t = sptr+11; + /* Keep looping until polynomial is degree 3; */ + while( t > sptr+3 ) + { + /* Pick up the highest coefficient of the poly. */ + b = *t; + + /* + * Compute x and (x+1/x) times this coefficient. + * See the MDS matrix implementation for a discussion of + * multiplication by x and 1/x. We just use different + * constants here as we are in a + * different finite field representation. + * + * These two statements set + * bx = (x) * b + * bxx= (x + 1/x) * b + */ + bx = (Twofish_Byte)((b<<1) ^ rs_poly_const[ b>>7 ]); + bxx= (Twofish_Byte)((b>>1) ^ rs_poly_div_const[ b&1 ] ^ bx); + + /* + * Subtract suitable multiple of + * y^4 + (x + 1/x)y^3 + (x)y^2 + (x + 1/x)y + 1 + * from the polynomial, except that we don't bother + * updating t[0] as it will become zero anyway. + */ + t[-1] ^= bxx; + t[-2] ^= bx; + t[-3] ^= bxx; + t[-4] ^= b; + + /* Go to the next coefficient. */ + t--; + } + + /* Go to next S-vector word, obeying the weird spacing rules. */ + sptr += 8; + } + + /* Wipe variables that contained key material. */ + b = bx = bxx = 0; + + /* And finally, we can compute the key-dependent S-boxes. */ + fill_keyed_sboxes( &K[32], kCycles, xkey ); + + /* Wipe array that contained key material. */ + (*memset_volatile)( K, 0, sizeof( K ) ); + return SUCCESS; + } + + +/* + * We can now start on the actual encryption and decryption code. + * As these are often speed-critical we will use a lot of macros. + */ + +/* + * The g() function is the heart of the round function. + * We have two versions of the g() function, one without an input + * rotation and one with. + * The pre-computed S-boxes make this pretty simple. + */ +#define g0(X,xkey) \ + (xkey->s[0][b0(X)]^xkey->s[1][b1(X)]^xkey->s[2][b2(X)]^xkey->s[3][b3(X)]) + +#define g1(X,xkey) \ + (xkey->s[0][b3(X)]^xkey->s[1][b0(X)]^xkey->s[2][b1(X)]^xkey->s[3][b2(X)]) + +/* + * A single round of Twofish. The A,B,C,D are the four state variables, + * T0 and T1 are temporaries, xkey is the expanded key, and r the + * round number. + * + * Note that this macro does not implement the swap at the end of the round. + */ +#define ENCRYPT_RND( A,B,C,D, T0, T1, xkey, r ) \ + T0 = g0(A,xkey); T1 = g1(B,xkey);\ + C ^= T0+T1+xkey->K[8+2*(r)]; C = ROR32(C,1);\ + D = ROL32(D,1); D ^= T0+2*T1+xkey->K[8+2*(r)+1] + +/* + * Encrypt a single cycle, consisting of two rounds. + * This avoids the swapping of the two halves. + * Parameter r is now the cycle number. + */ +#define ENCRYPT_CYCLE( A, B, C, D, T0, T1, xkey, r ) \ + ENCRYPT_RND( A,B,C,D,T0,T1,xkey,2*(r) );\ + ENCRYPT_RND( C,D,A,B,T0,T1,xkey,2*(r)+1 ) + +/* Full 16-round encryption */ +#define ENCRYPT( A,B,C,D,T0,T1,xkey ) \ + ENCRYPT_CYCLE( A,B,C,D,T0,T1,xkey, 0 );\ + ENCRYPT_CYCLE( A,B,C,D,T0,T1,xkey, 1 );\ + ENCRYPT_CYCLE( A,B,C,D,T0,T1,xkey, 2 );\ + ENCRYPT_CYCLE( A,B,C,D,T0,T1,xkey, 3 );\ + ENCRYPT_CYCLE( A,B,C,D,T0,T1,xkey, 4 );\ + ENCRYPT_CYCLE( A,B,C,D,T0,T1,xkey, 5 );\ + ENCRYPT_CYCLE( A,B,C,D,T0,T1,xkey, 6 );\ + ENCRYPT_CYCLE( A,B,C,D,T0,T1,xkey, 7 ) + +/* + * A single round of Twofish for decryption. It differs from + * ENCRYTP_RND only because of the 1-bit rotations. + */ +#define DECRYPT_RND( A,B,C,D, T0, T1, xkey, r ) \ + T0 = g0(A,xkey); T1 = g1(B,xkey);\ + C = ROL32(C,1); C ^= T0+T1+xkey->K[8+2*(r)];\ + D ^= T0+2*T1+xkey->K[8+2*(r)+1]; D = ROR32(D,1) + +/* + * Decrypt a single cycle, consisting of two rounds. + * This avoids the swapping of the two halves. + * Parameter r is now the cycle number. + */ +#define DECRYPT_CYCLE( A, B, C, D, T0, T1, xkey, r ) \ + DECRYPT_RND( A,B,C,D,T0,T1,xkey,2*(r)+1 );\ + DECRYPT_RND( C,D,A,B,T0,T1,xkey,2*(r) ) + +/* Full 16-round decryption. */ +#define DECRYPT( A,B,C,D,T0,T1, xkey ) \ + DECRYPT_CYCLE( A,B,C,D,T0,T1,xkey, 7 );\ + DECRYPT_CYCLE( A,B,C,D,T0,T1,xkey, 6 );\ + DECRYPT_CYCLE( A,B,C,D,T0,T1,xkey, 5 );\ + DECRYPT_CYCLE( A,B,C,D,T0,T1,xkey, 4 );\ + DECRYPT_CYCLE( A,B,C,D,T0,T1,xkey, 3 );\ + DECRYPT_CYCLE( A,B,C,D,T0,T1,xkey, 2 );\ + DECRYPT_CYCLE( A,B,C,D,T0,T1,xkey, 1 );\ + DECRYPT_CYCLE( A,B,C,D,T0,T1,xkey, 0 ) + +/* + * A macro to read the state from the plaintext and do the initial key xors. + * The koff argument allows us to use the same macro + * for the decryption which uses different key words at the start. + */ +#define GET_INPUT( src, A,B,C,D, xkey, koff ) \ + A = GET32(src )^xkey->K[ koff]; B = GET32(src+ 4)^xkey->K[1+koff]; \ + C = GET32(src+ 8)^xkey->K[2+koff]; D = GET32(src+12)^xkey->K[3+koff] + +/* + * Similar macro to put the ciphertext in the output buffer. + * We xor the keys into the state variables before we use the PUT32 + * macro as the macro might use its argument multiple times. + */ +#define PUT_OUTPUT( A,B,C,D, dst, xkey, koff ) \ + A ^= xkey->K[ koff]; B ^= xkey->K[1+koff]; \ + C ^= xkey->K[2+koff]; D ^= xkey->K[3+koff]; \ + PUT32( A, dst ); PUT32( B, dst+ 4 ); \ + PUT32( C, dst+8 ); PUT32( D, dst+12 ) + + +/* + * Twofish block encryption + * + * Arguments: + * xkey expanded key array + * p 16 bytes of plaintext + * c 16 bytes in which to store the ciphertext + */ +void Twofish_encrypt( Twofish_key * xkey, Twofish_Byte p[16], Twofish_Byte c[16]) + { + Twofish_UInt32 A,B,C,D,T0,T1; /* Working variables */ + + /* Get the four plaintext words xorred with the key */ + GET_INPUT( p, A,B,C,D, xkey, 0 ); + + /* Do 8 cycles (= 16 rounds) */ + ENCRYPT( A,B,C,D,T0,T1,xkey ); + + /* Store them with the final swap and the output whitening. */ + PUT_OUTPUT( C,D,A,B, c, xkey, 4 ); + } + + +/* + * Twofish block decryption. + * + * Arguments: + * xkey expanded key array + * p 16 bytes of plaintext + * c 16 bytes in which to store the ciphertext + */ +void Twofish_decrypt( Twofish_key * xkey, Twofish_Byte c[16], Twofish_Byte p[16]) + { + Twofish_UInt32 A,B,C,D,T0,T1; /* Working variables */ + + /* Get the four plaintext words xorred with the key */ + GET_INPUT( c, A,B,C,D, xkey, 4 ); + + /* Do 8 cycles (= 16 rounds) */ + DECRYPT( A,B,C,D,T0,T1,xkey ); + + /* Store them with the final swap and the output whitening. */ + PUT_OUTPUT( C,D,A,B, p, xkey, 0 ); + } + +/* + * Using the macros it is easy to make special routines for + * CBC mode, CTR mode etc. The only thing you might want to + * add is a XOR_PUT_OUTPUT which xors the outputs into the + * destinationa instead of overwriting the data. This requires + * a XOR_PUT32 macro as well, but that should all be trivial. + * + * I thought about including routines for the separate cipher + * modes here, but it is unclear which modes should be included, + * and each encryption or decryption routine takes up a lot of code space. + * Also, I don't have any test vectors for any cipher modes + * with Twofish. + */ + + diff --git a/libs/libks/crypt/twofish.h b/libs/libks/crypt/twofish.h new file mode 100755 index 0000000000..21d7e96341 --- /dev/null +++ b/libs/libks/crypt/twofish.h @@ -0,0 +1,265 @@ +/* + * Fast, portable, and easy-to-use Twofish implementation, + * Version 0.3. + * Copyright (c) 2002 by Niels Ferguson. + * + * See the twofish.c file for the details of the how and why of this code. + * + * The author hereby grants a perpetual license to everybody to + * use this code for any purpose as long as the copyright message is included + * in the source code of this or any derived work. + */ + + +/* + * PLATFORM FIXES + * ============== + * + * The following definitions have to be fixed for each particular platform + * you work on. If you have a multi-platform program, you no doubt have + * portable definitions that you can substitute here without changing + * the rest of the code. + * + * The defaults provided here should work on most PC compilers. + */ + +#ifndef TWOFISH_H +#define TWOFISH_H + +#ifdef __cplusplus +extern "C" +{ +#endif + +/** + * @file twofish.h + * @brief Function that provide basic Twofish crypto support + * + * @ingroup GNU_ZRTP + * @{ + */ + +/** + * A Twofish_Byte must be an unsigned 8-bit integer. + * + * It must also be the elementary data size of your C platform, + * i.e. sizeof( Twofish_Byte ) == 1. + */ +typedef unsigned char Twofish_Byte; + +/** + * A Twofish_UInt32 must be an unsigned integer of at least 32 bits. + * + * This type is used only internally in the implementation, so ideally it + * would not appear in the header file, but it is used inside the + * Twofish_key structure which means it has to be included here. + */ +typedef unsigned int Twofish_UInt32; + + +/* + * END OF PLATFORM FIXES + * ===================== + * + * You should not have to touch the rest of this file, but the code + * in twofish.c has a few things you need to fix too. + */ + +/** + * Return codes + */ +#define SUCCESS 1 +#define ERR_UINT32 -2 +#define ERR_BYTE -3 +#define ERR_GET32 -4 +#define ERR_PUT32 -5 +#define ERR_ROLR -6 +#define ERR_BSWAP -7 +#define ERR_SELECTB -8 +#define ERR_TEST_ENC -9 +#define ERR_TEST_DEC -10 +#define ERR_SEQ_ENC -11 +#define ERR_SEQ_DEC -12 +#define ERR_ODD_KEY -13 +#define ERR_INIT -14 +#define ERR_KEY_LEN -15 +#define ERR_ILL_ARG -16 + + +/** + * Structure that contains a prepared Twofish key. + * + * A cipher key is used in two stages. In the first stage it is converted + * form the original form to an internal representation. + * This internal form is then used to encrypt and decrypt data. + * This structure contains the internal form. It is rather large: 4256 bytes + * on a platform with 32-bit unsigned values. + * + * Treat this as an opague structure, and don't try to manipulate the + * elements in it. I wish I could hide the inside of the structure, + * but C doesn't allow that. + */ +typedef + struct + { + Twofish_UInt32 s[4][256]; /* pre-computed S-boxes */ + Twofish_UInt32 K[40]; /* Round key words */ + } + Twofish_key; + + +/** + * Initialise and test the Twofish implementation. + * + * This function MUST be called before any other function in the + * Twofish implementation is called. + * It only needs to be called once. + * + * Apart from initialising the implementation it performs a self test. + * If the Twofish_fatal function is not called, the code passed the test. + * (See the twofish.c file for details on the Twofish_fatal function.) + * + * @returns a negative number if an error happend, +1 otherwise + */ +extern int Twofish_initialise(); + + +/** + * Convert a cipher key to the internal form used for + * encryption and decryption. + * + * The cipher key is an array of bytes; the Twofish_Byte type is + * defined above to a type suitable on your platform. + * + * Any key must be converted to an internal form in the Twofisk_key structure + * before it can be used. + * The encryption and decryption functions only work with the internal form. + * The conversion to internal form need only be done once for each key value. + * + * Be sure to wipe all key storage, including the Twofish_key structure, + * once you are done with the key data. + * A simple memset( TwofishKey, 0, sizeof( TwofishKey ) ) will do just fine. + * + * Unlike most implementations, this one allows any key size from 0 bytes + * to 32 bytes. According to the Twofish specifications, + * irregular key sizes are handled by padding the key with zeroes at the end + * until the key size is 16, 24, or 32 bytes, whichever + * comes first. Note that each key of irregular size is equivalent to exactly + * one key of 16, 24, or 32 bytes. + * + * WARNING: Short keys have low entropy, and result in low security. + * Anything less than 8 bytes is utterly insecure. For good security + * use at least 16 bytes. I prefer to use 32-byte keys to prevent + * any collision attacks on the key. + * + * The key length argument key_len must be in the proper range. + * If key_len is not in the range 0,...,32 this routine attempts to generate + * a fatal error (depending on the code environment), + * and at best (or worst) returns without having done anything. + * + * @param key Array of key bytes + * @param key_len Number of key bytes, must be in the range 0,1,...,32. + * @param xkey Pointer to an Twofish_key structure that will be filled + * with the internal form of the cipher key. + * @returns a negative number if an error happend, +1 otherwise + */ +extern int Twofish_prepare_key( + Twofish_Byte key[], + int key_len, + Twofish_key * xkey + ); + + +/** + * Encrypt a single block of data. + * + * This function encrypts a single block of 16 bytes of data. + * If you want to encrypt a larger or variable-length message, + * you will have to use a cipher mode, such as CBC or CTR. + * These are outside the scope of this implementation. + * + * The xkey structure is not modified by this routine, and can be + * used for further encryption and decryption operations. + * + * @param xkey pointer to Twofish_key, internal form of the key + * produces by Twofish_prepare_key() + * @param p Plaintext to be encrypted + * @param c Place to store the ciphertext + */ +extern void Twofish_encrypt( + Twofish_key * xkey, + Twofish_Byte p[16], + Twofish_Byte c[16] + ); + + +/** + * Decrypt a single block of data. + * + * This function decrypts a single block of 16 bytes of data. + * If you want to decrypt a larger or variable-length message, + * you will have to use a cipher mode, such as CBC or CTR. + * These are outside the scope of this implementation. + * + * The xkey structure is not modified by this routine, and can be + * used for further encryption and decryption operations. + * + * @param xkey pointer to Twofish_key, internal form of the key + * produces by Twofish_prepare_key() + * @param c Ciphertext to be decrypted + * @param p Place to store the plaintext + */ +extern void Twofish_decrypt( + Twofish_key * xkey, + Twofish_Byte c[16], + Twofish_Byte p[16] + ); + + +/** + * Encrypt data in CFB mode. + * + * This function encrypts data in CFB mode. + * + * The key structure is not modified by this routine, and can be + * used for further encryption and decryption operations. + * + * @param keyCtx pointer to Twofish_key, internal form of the key + * produced by Twofish_prepare_key() + * @param in Plaintext to be encrypted + * @param out Place to store the ciphertext + * @param len number of bytes to encrypt. + * @param ivec initialization vector for this CFB mode encryption. + * @param num pointer to integer that holds number of available crypto bytes. + */ +void Twofish_cfb128_encrypt(Twofish_key* keyCtx, Twofish_Byte* in, + Twofish_Byte* out, size_t len, + Twofish_Byte* ivec, int *num); + +/** + * Decrypt data in CFB mode. + * + * This function decrypts data in CFB. + * + * The key structure is not modified by this routine, and can be + * used for further encryption and decryption operations. + * + * @param keyCtx pointer to Twofish_key, internal form of the key + * produced by Twofish_prepare_key() + * @param in Ciphertext to be decrypted + * @param out Place to store the plaintext + * @param len number of bytes to decrypt. + * @param ivec initialization vector for this CFB mode encryption. + * @param num pointer to integer that holds number of available crypto bytes. + */ +void Twofish_cfb128_decrypt(Twofish_key* keyCtx, Twofish_Byte* in, + Twofish_Byte* out, size_t len, + Twofish_Byte* ivec, int *num); +/** + * @} + */ +#ifdef __cplusplus +} +#endif + +#endif diff --git a/libs/libks/crypt/twofish_cfb.c b/libs/libks/crypt/twofish_cfb.c new file mode 100755 index 0000000000..8ade853539 --- /dev/null +++ b/libs/libks/crypt/twofish_cfb.c @@ -0,0 +1,98 @@ +#include +#include + +#include "twofish.h" + +#ifdef ANDROID +void Two_debugDummy(Twofish_Byte* in, Twofish_Byte* out, Twofish_Byte* ivec); +#endif + +void Twofish_cfb128_encrypt(Twofish_key* keyCtx, Twofish_Byte* in, + Twofish_Byte* out, size_t len, + Twofish_Byte* ivec, int32_t *num) +{ + uint32_t n; + + n = *num; + + do { + while (n && len) { + *(out++) = ivec[n] ^= *(in++); + --len; + n = (n+1) % 16; + } + while (len>=16) { + Twofish_encrypt(keyCtx, ivec, ivec); + for (n=0; n<16; n+=sizeof(size_t)) { + +/* + * Some GCC version(s) of Android's NDK produce code that leads to a crash (SIGBUS). The + * offending line if the line that produces the output by xor'ing the ivec. Somehow the + * compiler/optimizer seems to incorrectly setup the pointers. Adding a call to an + * external function that uses the pointer disabled or modifies this optimzing + * behaviour. This debug functions as such does nothing, it just disables some + * optimization. Don't use a local (static) function - the compiler sees that it does + * nothing and optimizes again :-) . + */ +#ifdef ANDROID + Two_debugDummy(in, out, ivec); +#endif + *(size_t*)(out+n) = *(size_t*)(ivec+n) ^= *(size_t*)(in+n);; + } + len -= 16; + out += 16; + in += 16; + } + n = 0; + if (len) { + Twofish_encrypt(keyCtx, ivec, ivec); + while (len--) { + out[n] = ivec[n] ^= in[n]; + ++n; + } + } + *num = n; + return; + } while (0); +} + + +void Twofish_cfb128_decrypt(Twofish_key* keyCtx, Twofish_Byte* in, + Twofish_Byte* out, size_t len, + Twofish_Byte* ivec, int32_t *num) +{ + uint32_t n; + + n = *num; + + do { + while (n && len) { + unsigned char c; + *(out++) = ivec[n] ^ (c = *(in++)); ivec[n] = c; + --len; + n = (n+1) % 16; + } + while (len>=16) { + Twofish_encrypt(keyCtx, ivec, ivec); + for (n=0; n<16; n+=sizeof(size_t)) { + size_t t = *(size_t*)(in+n); + *(size_t*)(out+n) = *(size_t*)(ivec+n) ^ t; + *(size_t*)(ivec+n) = t; + } + len -= 16; + out += 16; + in += 16; + } + n = 0; + if (len) { + Twofish_encrypt(keyCtx, ivec, ivec); + while (len--) { + unsigned char c; + out[n] = ivec[n] ^ (c = in[n]); ivec[n] = c; + ++n; + } + } + *num = n; + return; + } while (0); +} diff --git a/libs/libks/dht_bencode_encoder.diff b/libs/libks/dht_bencode_encoder.diff new file mode 100644 index 0000000000..65dd0f7221 --- /dev/null +++ b/libs/libks/dht_bencode_encoder.diff @@ -0,0 +1,407 @@ +diff --git a/libs/libks/src/ks_dht.c b/libs/libks/src/ks_dht.c +index 27bafd0..126686e 100644 +--- a/libs/libks/src/ks_dht.c ++++ b/libs/libks/src/ks_dht.c +@@ -2438,96 +2438,84 @@ static int dht_send(dht_handle_t *h, const void *buf, size_t len, int flags, con + return sendto(s, buf, len, flags, sa, salen); + } + ++/* Sample ping packet '{"t":"aa", "y":"q", "q":"ping", "a":{"id":"abcdefghij0123456789"}}' */ ++/* http://www.bittorrent.org/beps/bep_0005.html */ + int send_ping(dht_handle_t *h, const struct sockaddr *sa, int salen, const unsigned char *tid, int tid_len) + { + char buf[512]; +- int i = 0;//, rc; +- struct bencode *bencode_p = NULL; +- struct bencode *bencode_a_p = NULL; +- +- /* Sets some default values for message, then encodes 20 characters worth of local node id */ +- /* also adds the transaction id tid, then a few final key values. */ +- +- /* Sample encoded ping 'd1:ad2:id20:Td2????#?)y1:q4:ping1:t4:pn' */ +- /* 'd1:ad2:id20:Td2????#?)y1:q4:ping1:t4:pn' */ +- /* https://en.wikipedia.org/wiki/Bencode */ +- /* Sample ping packet '{"t":"aa", "y":"q", "q":"ping", "a":{"id":"abcdefghij0123456789"}}' */ +- /* http://www.bittorrent.org/beps/bep_0005.html */ ++ int i = 0; ++ struct bencode *bencode_p = ben_dict(); ++ struct bencode *bencode_a_p = ben_dict(); + +- bencode_a_p = ben_dict(); /* Initialize empty bencode dictionary */ ++ ben_dict_set(bencode_p, ben_blob("t", 1), ben_blob(tid, tid_len)); ++ ben_dict_set(bencode_p, ben_blob("y", 1), ben_blob("q", 1)); ++ ben_dict_set(bencode_p, ben_blob("q", 1), ben_blob("ping", 4)); + ben_dict_set(bencode_a_p, ben_blob("id", 2), ben_blob(h->myid, 20)); +- +- bencode_p = ben_dict(); + ben_dict_set(bencode_p, ben_blob("a", 1), bencode_a_p); +- ben_dict_set(bencode_p, ben_blob("q", 1), ben_blob("ping", 4)); +- ben_dict_set(bencode_p, ben_blob("t", 1), ben_blob(tid, tid_len)); +- ben_dict_set(bencode_p, ben_blob("y", 1), ben_blob("q", 1)); + +- /* +- rc = ks_snprintf(buf + i, 512 - i, "d1:ad2:id20:"); INC(i, rc, 512); +- COPY(buf, i, h->myid, 20, 512); +- rc = ks_snprintf(buf + i, 512 - i, "e1:q4:ping1:t%d:", tid_len); +- INC(i, rc, 512); +- COPY(buf, i, tid, tid_len, 512); +- ADD_V(buf, i, 512); +- rc = ks_snprintf(buf + i, 512 - i, "1:y1:qe"); INC(i, rc, 512); +- */ + ben_encode2(buf, 512, bencode_p); + ben_free(bencode_p); /* This SHOULD free the bencode_a_p as well */ + + ks_log(KS_LOG_DEBUG, "Encoded PING: %s\n\n", buf); + return dht_send(h, buf, i, 0, sa, salen); +- +- /* +- // Need to fix, not just disable error handling. +- fail: +- errno = ENOSPC; +- return -1; */ + } + ++/* Sample pong packet '{"t":"aa", "y":"r", "r": {"id":"mnopqrstuvwxyz123456"}}' */ ++/* http://www.bittorrent.org/beps/bep_0005.html */ + int send_pong(dht_handle_t *h, const struct sockaddr *sa, int salen, const unsigned char *tid, int tid_len) + { + char buf[512]; +- int i = 0, rc; +- rc = ks_snprintf(buf + i, 512 - i, "d1:rd2:id20:"); INC(i, rc, 512); +- COPY(buf, i, h->myid, 20, 512); +- rc = ks_snprintf(buf + i, 512 - i, "e1:t%d:", tid_len); INC(i, rc, 512); +- COPY(buf, i, tid, tid_len, 512); +- ADD_V(buf, i, 512); +- rc = ks_snprintf(buf + i, 512 - i, "1:y1:re"); INC(i, rc, 512); +- return dht_send(h, buf, i, 0, sa, salen); ++ int i = 0; ++ struct bencode *bencode_p = ben_dict(); ++ struct bencode *bencode_a_p = ben_dict(); + +- fail: +- errno = ENOSPC; +- return -1; ++ ben_dict_set(bencode_p, ben_blob("t", 1), ben_blob(tid, tid_len)); ++ ben_dict_set(bencode_p, ben_blob("y", 1), ben_blob("r", 1)); ++ ben_dict_set(bencode_a_p, ben_blob("id", 2), ben_blob(h->myid, 20)); ++ ben_dict_set(bencode_p, ben_blob("r", 1), bencode_a_p); ++ ++ ben_encode2(buf, 512, bencode_p); ++ ben_free(bencode_p); /* This SHOULD free the bencode_a_p as well */ ++ ++ ks_log(KS_LOG_DEBUG, "Encoded PONG: %s\n\n", buf); ++ return dht_send(h, buf, i, 0, sa, salen); + } + ++/* Sample find_node packet '{"t":"aa", "y":"q", "q":"find_node", "a": {"id":"abcdefghij0123456789", "target":"mnopqrstuvwxyz123456"}}' */ ++/* Sample find_node packet w/ want '{"t":"aa", "y":"q", "q":"find_node", "a": {"id":"abcdefghij0123456789", "target":"mnopqrstuvwxyz123456", "want":"n4"}}' */ ++/* http://www.bittorrent.org/beps/bep_0005.html */ ++/* http://www.bittorrent.org/beps/bep_0032.html for want parameter */ + int send_find_node(dht_handle_t *h, const struct sockaddr *sa, int salen, + const unsigned char *tid, int tid_len, + const unsigned char *target, int want, int confirm) + { + char buf[512]; +- int i = 0, rc; +- rc = ks_snprintf(buf + i, 512 - i, "d1:ad2:id20:"); INC(i, rc, 512); +- COPY(buf, i, h->myid, 20, 512); +- rc = ks_snprintf(buf + i, 512 - i, "6:target20:"); INC(i, rc, 512); +- COPY(buf, i, target, 20, 512); +- if (want > 0) { +- rc = ks_snprintf(buf + i, 512 - i, "4:wantl%s%se", (want & WANT4) ? "2:n4" : "", (want & WANT6) ? "2:n6" : ""); +- INC(i, rc, 512); +- } +- rc = ks_snprintf(buf + i, 512 - i, "e1:q9:find_node1:t%d:", tid_len); +- INC(i, rc, 512); +- COPY(buf, i, tid, tid_len, 512); +- ADD_V(buf, i, 512); +- rc = ks_snprintf(buf + i, 512 - i, "1:y1:qe"); INC(i, rc, 512); +- return dht_send(h, buf, i, confirm ? MSG_CONFIRM : 0, sa, salen); ++ int i = 0; ++ struct bencode *bencode_p = ben_dict(); ++ struct bencode *bencode_a_p = ben_dict(); ++ int target_len = target ? strlen((const char*)target) : 0; + +- fail: +- errno = ENOSPC; +- return -1; +-} ++ ben_dict_set(bencode_p, ben_blob("t", 1), ben_blob(tid, tid_len)); ++ ben_dict_set(bencode_p, ben_blob("y", 1), ben_blob("q", 1)); ++ ben_dict_set(bencode_p, ben_blob("q", 1), ben_blob("find_node", 9)); ++ ben_dict_set(bencode_a_p, ben_blob("id", 2), ben_blob(h->myid, 20)); ++ if (target) ben_dict_set(bencode_a_p, ben_blob("target", 6), ben_blob(target, target_len)); ++ if (want > 0) { ++ char *w = NULL; ++ if (want & WANT4) w = "n4"; ++ if (want & WANT6) w = "n6"; ++ if (w) ben_dict_set(bencode_a_p, ben_blob("want", 4), ben_blob(w, 2)); ++ } ++ ben_dict_set(bencode_p, ben_blob("a", 1), bencode_a_p); + ++ ben_encode2(buf, 512, bencode_p); ++ ben_free(bencode_p); /* This SHOULD free the bencode_a_p as well */ ++ ++ ks_log(KS_LOG_DEBUG, "Encoded FIND_NODE: %s\n\n", buf); ++ return dht_send(h, buf, i, confirm ? MSG_CONFIRM : 0, sa, salen); ++} ++/* sample find_node response '{"t":"aa", "y":"r", "r": {"id":"0123456789abcdefghij", "nodes": "def456..."}}'*/ ++/* http://www.bittorrent.org/beps/bep_0005.html */ + int send_nodes_peers(dht_handle_t *h, const struct sockaddr *sa, int salen, + const unsigned char *tid, int tid_len, + const unsigned char *nodes, int nodes_len, +@@ -2536,30 +2524,28 @@ int send_nodes_peers(dht_handle_t *h, const struct sockaddr *sa, int salen, + const unsigned char *token, int token_len) + { + char buf[2048]; +- int i = 0, rc, j0, j, k, len; ++ int i = 0;//, rc, j0, j, k, len; ++ struct bencode *bencode_p = ben_dict(); ++ struct bencode *bencode_a_p = ben_dict(); ++ struct bencode *ben_array = ben_list(); + +- rc = ks_snprintf(buf + i, 2048 - i, "d1:rd2:id20:"); INC(i, rc, 2048); +- COPY(buf, i, h->myid, 20, 2048); +- if (nodes_len > 0) { +- rc = ks_snprintf(buf + i, 2048 - i, "5:nodes%d:", nodes_len); +- INC(i, rc, 2048); +- COPY(buf, i, nodes, nodes_len, 2048); +- } +- if (nodes6_len > 0) { +- rc = ks_snprintf(buf + i, 2048 - i, "6:nodes6%d:", nodes6_len); +- INC(i, rc, 2048); +- COPY(buf, i, nodes6, nodes6_len, 2048); +- } +- if (token_len > 0) { +- rc = ks_snprintf(buf + i, 2048 - i, "5:token%d:", token_len); +- INC(i, rc, 2048); +- COPY(buf, i, token, token_len, 2048); +- } ++ ben_dict_set(bencode_p, ben_blob("t", 1), ben_blob(tid, tid_len)); ++ ben_dict_set(bencode_p, ben_blob("y", 1), ben_blob("r", 1)); ++ ben_dict_set(bencode_a_p, ben_blob("id", 2), ben_blob(h->myid, 20)); ++ if (token_len) ben_dict_set(bencode_a_p, ben_blob("token", 5), ben_blob(token, token_len)); ++ if (nodes_len) ben_dict_set(bencode_a_p, ben_blob("nodes", 5), ben_blob(token, nodes_len)); ++ if (nodes6_len) ben_dict_set(bencode_a_p, ben_blob("nodes6", 6), ben_blob(token, nodes6_len)); + ++ /* its an array, how do i do this?? ++ ++Response with peers = {"t":"aa", "y":"r", "r": {"id":"abcdefghij0123456789", "token":"aoeusnth", "values": ["axje.u", "idhtnm"]}} ++ */ ++ ++ /* TODO XXXXXX find docs and add "values" stuff into this encode + if (st && st->numpeers > 0) { +- /* We treat the storage as a circular list, and serve a randomly +- chosen slice. In order to make sure we fit within 1024 octets, +- we limit ourselves to 50 peers. */ ++ // We treat the storage as a circular list, and serve a randomly ++ // chosen slice. In order to make sure we fit within 1024 octets, ++ // we limit ourselves to 50 peers. + + len = af == AF_INET ? 4 : 16; + j0 = random() % st->numpeers; +@@ -2582,19 +2568,13 @@ int send_nodes_peers(dht_handle_t *h, const struct sockaddr *sa, int salen, + rc = ks_snprintf(buf + i, 2048 - i, "e"); + INC(i, rc, 2048); + } +- +- rc = ks_snprintf(buf + i, 2048 - i, "e1:t%d:", tid_len); +- INC(i, rc, 2048); +- COPY(buf, i, tid, tid_len, 2048); +- ADD_V(buf, i, 2048); +- rc = ks_snprintf(buf + i, 2048 - i, "1:y1:re"); +- INC(i, rc, 2048); +- +- return dht_send(h, buf, i, 0, sa, salen); +- +- fail: +- errno = ENOSPC; +- return -1; ++*/ ++ ben_dict_set(bencode_p, ben_blob("r", 1), bencode_a_p); ++ ben_encode2(buf, 512, bencode_p); ++ ben_free(bencode_p); /* This SHOULD free the bencode_a_p as well */ ++ ++ ks_log(KS_LOG_DEBUG, "Encoded FIND_NODE: %s\n\n", buf); ++ return dht_send(h, buf, i, 0, sa, salen); + } + + static int insert_closest_node(unsigned char *nodes, int numnodes, +@@ -2706,104 +2686,107 @@ int send_closest_nodes(dht_handle_t *h, const struct sockaddr *sa, int salen, + af, st, token, token_len); + } + ++/* sample get_peers request '{"t":"aa", "y":"q", "q":"get_peers", "a": {"id":"abcdefghij0123456789", "info_hash":"mnopqrstuvwxyz123456"}}'*/ ++/* sample get_peers w/ want '{"t":"aa", "y":"q", "q":"get_peers", "a": {"id":"abcdefghij0123456789", "info_hash":"mnopqrstuvwxyz123456": "want":"n4"}}'*/ ++/* http://www.bittorrent.org/beps/bep_0005.html */ ++/* http://www.bittorrent.org/beps/bep_0032.html for want parameter */ + int send_get_peers(dht_handle_t *h, const struct sockaddr *sa, int salen, + unsigned char *tid, int tid_len, unsigned char *infohash, + int want, int confirm) + { + char buf[512]; +- int i = 0, rc; +- +- rc = ks_snprintf(buf + i, 512 - i, "d1:ad2:id20:"); INC(i, rc, 512); +- COPY(buf, i, h->myid, 20, 512); +- rc = ks_snprintf(buf + i, 512 - i, "9:info_hash20:"); INC(i, rc, 512); +- COPY(buf, i, infohash, 20, 512); +- if (want > 0) { +- rc = ks_snprintf(buf + i, 512 - i, "4:wantl%s%se", (want & WANT4) ? "2:n4" : "", (want & WANT6) ? "2:n6" : ""); +- INC(i, rc, 512); +- } +- rc = ks_snprintf(buf + i, 512 - i, "e1:q9:get_peers1:t%d:", tid_len); +- INC(i, rc, 512); +- COPY(buf, i, tid, tid_len, 512); +- ADD_V(buf, i, 512); +- rc = ks_snprintf(buf + i, 512 - i, "1:y1:qe"); INC(i, rc, 512); +- return dht_send(h, buf, i, confirm ? MSG_CONFIRM : 0, sa, salen); ++ int i = 0; ++ struct bencode *bencode_p = ben_dict(); ++ struct bencode *bencode_a_p = ben_dict(); ++ int infohash_len = infohash ? strlen((const char*)infohash) : 0; + +- fail: +- errno = ENOSPC; +- return -1; +-} ++ ben_dict_set(bencode_p, ben_blob("t", 1), ben_blob(tid, tid_len)); ++ ben_dict_set(bencode_p, ben_blob("y", 1), ben_blob("q", 1)); ++ ben_dict_set(bencode_p, ben_blob("q", 1), ben_blob("get_peers", 9)); ++ ben_dict_set(bencode_a_p, ben_blob("id", 2), ben_blob(h->myid, 20)); ++ if (want > 0) { ++ char *w = NULL; ++ if (want & WANT4) w = "n4"; ++ if (want & WANT6) w = "n6"; ++ if (w) ben_dict_set(bencode_a_p, ben_blob("want", 4), ben_blob(w, 2)); ++ } ++ ben_dict_set(bencode_a_p, ben_blob("info_hash", 9), ben_blob(infohash, infohash_len)); ++ ben_dict_set(bencode_p, ben_blob("a", 1), bencode_a_p); + ++ ben_encode2(buf, 512, bencode_p); ++ ben_free(bencode_p); /* This SHOULD free the bencode_a_p as well */ ++ ++ ks_log(KS_LOG_DEBUG, "Encoded GET_PEERS: %s\n\n", buf); ++ return dht_send(h, buf, i, confirm ? MSG_CONFIRM : 0, sa, salen); ++} ++/* '{"t":"aa", "y":"q", "q":"announce_peer", "a": {"id":"abcdefghij0123456789", "implied_port": 1, "info_hash":"mnopqrstuvwxyz123456", "port": 6881, "token": "aoeusnth"}}'*/ + int send_announce_peer(dht_handle_t *h, const struct sockaddr *sa, int salen, + unsigned char *tid, int tid_len, + unsigned char *infohash, unsigned short port, + unsigned char *token, int token_len, int confirm) + { + char buf[512]; +- int i = 0, rc; +- +- rc = ks_snprintf(buf + i, 512 - i, "d1:ad2:id20:"); INC(i, rc, 512); +- COPY(buf, i, h->myid, 20, 512); +- rc = ks_snprintf(buf + i, 512 - i, "9:info_hash20:"); INC(i, rc, 512); +- COPY(buf, i, infohash, 20, 512); +- rc = ks_snprintf(buf + i, 512 - i, "4:porti%ue5:token%d:", (unsigned)port, token_len); +- INC(i, rc, 512); +- COPY(buf, i, token, token_len, 512); +- rc = ks_snprintf(buf + i, 512 - i, "e1:q13:announce_peer1:t%d:", tid_len); +- INC(i, rc, 512); +- COPY(buf, i, tid, tid_len, 512); +- ADD_V(buf, i, 512); +- rc = ks_snprintf(buf + i, 512 - i, "1:y1:qe"); INC(i, rc, 512); ++ int i = 0; ++ struct bencode *bencode_p = ben_dict(); ++ struct bencode *bencode_a_p = ben_dict(); ++ int infohash_len = infohash ? strlen((const char*)infohash) : 0; + +- return dht_send(h, buf, i, confirm ? MSG_CONFIRM : 0, sa, salen); ++ ben_dict_set(bencode_p, ben_blob("t", 1), ben_blob(tid, tid_len)); ++ ben_dict_set(bencode_p, ben_blob("y", 1), ben_blob("q", 1)); ++ ben_dict_set(bencode_p, ben_blob("q", 1), ben_blob("announce_peer", 13)); ++ ben_dict_set(bencode_a_p, ben_blob("id", 2), ben_blob(h->myid, 20)); ++ ben_dict_set(bencode_a_p, ben_blob("info_hash", 9), ben_blob(infohash, infohash_len)); ++ ben_dict_set(bencode_a_p, ben_blob("port", 5), ben_int(port)); ++ ben_dict_set(bencode_a_p, ben_blob("token", 5), ben_blob(token, token_len)); ++ ben_dict_set(bencode_p, ben_blob("a", 1), bencode_a_p); + +- fail: +- errno = ENOSPC; +- return -1; ++ ben_encode2(buf, 512, bencode_p); ++ ben_free(bencode_p); /* This SHOULD free the bencode_a_p as well */ ++ ++ ks_log(KS_LOG_DEBUG, "Encoded ANNOUNCE_PEERS: %s\n\n", buf); ++ return dht_send(h, buf, i, confirm ? MSG_CONFIRM : 0, sa, salen); + } +- ++/* '{"t":"aa", "y":"r", "r": {"id":"mnopqrstuvwxyz123456"}}'*/ + static int send_peer_announced(dht_handle_t *h, const struct sockaddr *sa, int salen, unsigned char *tid, int tid_len) + { + char buf[512]; +- int i = 0, rc; +- +- rc = ks_snprintf(buf + i, 512 - i, "d1:rd2:id20:"); +- INC(i, rc, 512); +- COPY(buf, i, h->myid, 20, 512); +- rc = ks_snprintf(buf + i, 512 - i, "e1:t%d:", tid_len); +- INC(i, rc, 512); +- COPY(buf, i, tid, tid_len, 512); +- ADD_V(buf, i, 512); +- rc = ks_snprintf(buf + i, 512 - i, "1:y1:re"); +- INC(i, rc, 512); +- return dht_send(h, buf, i, 0, sa, salen); ++ int i = 0; ++ struct bencode *bencode_p = ben_dict(); ++ struct bencode *bencode_a_p = ben_dict(); + +- fail: +- errno = ENOSPC; +- return -1; ++ ben_dict_set(bencode_p, ben_blob("t", 1), ben_blob(tid, tid_len)); ++ ben_dict_set(bencode_p, ben_blob("y", 1), ben_blob("r", 1)); ++ ben_dict_set(bencode_a_p, ben_blob("id", 2), ben_blob(h->myid, 20)); ++ ben_dict_set(bencode_p, ben_blob("r", 1), bencode_a_p); ++ ++ ben_encode2(buf, 512, bencode_p); ++ ben_free(bencode_p); /* This SHOULD free the bencode_a_p as well */ ++ ++ ks_log(KS_LOG_DEBUG, "Encoded peer_announced: %s\n\n", buf); ++ return dht_send(h, buf, i, 0, sa, salen); + } + ++/* '{"t":"aa", "y":"e", "e":[201, "A Generic Error Ocurred"]}'*/ + static int send_error(dht_handle_t *h, const struct sockaddr *sa, int salen, + unsigned char *tid, int tid_len, + int code, const char *message) + { + char buf[512]; +- int i = 0, rc, message_len; +- +- message_len = strlen(message); +- rc = ks_snprintf(buf + i, 512 - i, "d1:eli%de%d:", code, message_len); +- INC(i, rc, 512); +- COPY(buf, i, message, message_len, 512); +- rc = ks_snprintf(buf + i, 512 - i, "e1:t%d:", tid_len); +- INC(i, rc, 512); +- COPY(buf, i, tid, tid_len, 512); +- ADD_V(buf, i, 512); +- rc = ks_snprintf(buf + i, 512 - i, "1:y1:ee"); +- INC(i, rc, 512); +- return dht_send(h, buf, i, 0, sa, salen); ++ int i = 0; ++ struct bencode *bencode_p = ben_dict(); ++ struct bencode *ben_array = ben_list(); + +- fail: +- errno = ENOSPC; +- return -1; ++ ben_dict_set(bencode_p, ben_blob("t", 1), ben_blob(tid, tid_len)); ++ ben_dict_set(bencode_p, ben_blob("y", 1), ben_blob("e", 1)); ++ ben_list_append(ben_array, ben_int(code)); ++ ben_list_append(ben_array, ben_blob(message, strlen(message))); ++ ben_dict_set(bencode_p, ben_blob("e", 1), ben_array); ++ ++ ben_encode2(buf, 512, bencode_p); ++ ben_free(bencode_p); ++ ++ ks_log(KS_LOG_DEBUG, "Encoded error: %s\n\n", buf); ++ return dht_send(h, buf, i, 0, sa, salen); + } + + #undef CHECK diff --git a/libs/libks/libks.pc.in b/libs/libks/libks.pc.in new file mode 100644 index 0000000000..6d53390d4f --- /dev/null +++ b/libs/libks/libks.pc.in @@ -0,0 +1,11 @@ +prefix=@prefix@ +exec_prefix=@exec_prefix@ +libdir=@libdir@ +includedir=@includedir@ + +Name: @PACKAGE_NAME@ +Version: @PACKAGE_VERSION@ +Description: A cross platform kitchen sink library. + +Cflags: -I${includedir} +Libs: -L${libdir} -lks diff --git a/libs/libks/libks.props b/libs/libks/libks.props index 35b74ad4dd..b22cb88d24 100644 --- a/libs/libks/libks.props +++ b/libs/libks/libks.props @@ -5,9 +5,9 @@ - SIMCLIST_NO_DUMPRESTORE;_CRT_SECURE_NO_WARNINGS;KS_EXPORTS;%(PreprocessorDefinitions) + UNICODE;SIMCLIST_NO_DUMPRESTORE;_CRT_SECURE_NO_WARNINGS;KS_EXPORTS;%(PreprocessorDefinitions) $(ProjectDir)\src\include;$(ProjectDir)\src\win\sys;$(ProjectDir)\src\win;%(AdditionalIncludeDirectories) - 4574;4100;4127;4668;4255;4706;4710;4820 + 4711;4574;4100;4127;4668;4255;4706;4710;4820 diff --git a/libs/libks/libks.sln b/libs/libks/libks.sln index fe03d30e74..e0c1b4869f 100644 --- a/libs/libks/libks.sln +++ b/libs/libks/libks.sln @@ -5,10 +5,17 @@ VisualStudioVersion = 14.0.23107.0 MinimumVisualStudioVersion = 10.0.40219.1 Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libks", "libks.vcxproj", "{70D178D8-1100-4152-86C0-809A91CFF832}" EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "testpools", "test\testpools\testpools.vcxproj", "{766F7FF4-CF39-4CDF-ABDC-4E9C88568F1F}" - ProjectSection(ProjectDependencies) = postProject - {70D178D8-1100-4152-86C0-809A91CFF832} = {70D178D8-1100-4152-86C0-809A91CFF832} - EndProjectSection +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "testpools", "test\testpools.vcxproj", "{5825A3B2-31A0-475A-AF32-44FB0D8B52D4}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "testthreadmutex", "test\testthreadmutex.vcxproj", "{AE572500-7266-4692-ACA4-5E37B7B4409A}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "testhash", "test\testhash.vcxproj", "{43724CF4-FCE1-44FE-AB36-C86E3979B350}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "testq", "test\testq.vcxproj", "{3F8E0DF3-F402-40E0-8D78-44A094625D25}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "testsock", "test\testsock.vcxproj", "{5DC38E2B-0512-4140-8A1B-59952A5DC9CB}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "testtime", "test\testtime.vcxproj", "{B74812A1-C67D-4568-AF84-26CE2004D8BF}" EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution @@ -26,14 +33,54 @@ Global {70D178D8-1100-4152-86C0-809A91CFF832}.Release|x64.Build.0 = Release|x64 {70D178D8-1100-4152-86C0-809A91CFF832}.Release|x86.ActiveCfg = Release|Win32 {70D178D8-1100-4152-86C0-809A91CFF832}.Release|x86.Build.0 = Release|Win32 - {766F7FF4-CF39-4CDF-ABDC-4E9C88568F1F}.Debug|x64.ActiveCfg = Debug|x64 - {766F7FF4-CF39-4CDF-ABDC-4E9C88568F1F}.Debug|x64.Build.0 = Debug|x64 - {766F7FF4-CF39-4CDF-ABDC-4E9C88568F1F}.Debug|x86.ActiveCfg = Debug|Win32 - {766F7FF4-CF39-4CDF-ABDC-4E9C88568F1F}.Debug|x86.Build.0 = Debug|Win32 - {766F7FF4-CF39-4CDF-ABDC-4E9C88568F1F}.Release|x64.ActiveCfg = Release|x64 - {766F7FF4-CF39-4CDF-ABDC-4E9C88568F1F}.Release|x64.Build.0 = Release|x64 - {766F7FF4-CF39-4CDF-ABDC-4E9C88568F1F}.Release|x86.ActiveCfg = Release|Win32 - {766F7FF4-CF39-4CDF-ABDC-4E9C88568F1F}.Release|x86.Build.0 = Release|Win32 + {5825A3B2-31A0-475A-AF32-44FB0D8B52D4}.Debug|x64.ActiveCfg = Debug|x64 + {5825A3B2-31A0-475A-AF32-44FB0D8B52D4}.Debug|x64.Build.0 = Debug|x64 + {5825A3B2-31A0-475A-AF32-44FB0D8B52D4}.Debug|x86.ActiveCfg = Debug|Win32 + {5825A3B2-31A0-475A-AF32-44FB0D8B52D4}.Debug|x86.Build.0 = Debug|Win32 + {5825A3B2-31A0-475A-AF32-44FB0D8B52D4}.Release|x64.ActiveCfg = Release|x64 + {5825A3B2-31A0-475A-AF32-44FB0D8B52D4}.Release|x64.Build.0 = Release|x64 + {5825A3B2-31A0-475A-AF32-44FB0D8B52D4}.Release|x86.ActiveCfg = Release|Win32 + {5825A3B2-31A0-475A-AF32-44FB0D8B52D4}.Release|x86.Build.0 = Release|Win32 + {AE572500-7266-4692-ACA4-5E37B7B4409A}.Debug|x64.ActiveCfg = Debug|x64 + {AE572500-7266-4692-ACA4-5E37B7B4409A}.Debug|x64.Build.0 = Debug|x64 + {AE572500-7266-4692-ACA4-5E37B7B4409A}.Debug|x86.ActiveCfg = Debug|Win32 + {AE572500-7266-4692-ACA4-5E37B7B4409A}.Debug|x86.Build.0 = Debug|Win32 + {AE572500-7266-4692-ACA4-5E37B7B4409A}.Release|x64.ActiveCfg = Release|x64 + {AE572500-7266-4692-ACA4-5E37B7B4409A}.Release|x64.Build.0 = Release|x64 + {AE572500-7266-4692-ACA4-5E37B7B4409A}.Release|x86.ActiveCfg = Release|Win32 + {AE572500-7266-4692-ACA4-5E37B7B4409A}.Release|x86.Build.0 = Release|Win32 + {43724CF4-FCE1-44FE-AB36-C86E3979B350}.Debug|x64.ActiveCfg = Debug|x64 + {43724CF4-FCE1-44FE-AB36-C86E3979B350}.Debug|x64.Build.0 = Debug|x64 + {43724CF4-FCE1-44FE-AB36-C86E3979B350}.Debug|x86.ActiveCfg = Debug|Win32 + {43724CF4-FCE1-44FE-AB36-C86E3979B350}.Debug|x86.Build.0 = Debug|Win32 + {43724CF4-FCE1-44FE-AB36-C86E3979B350}.Release|x64.ActiveCfg = Release|x64 + {43724CF4-FCE1-44FE-AB36-C86E3979B350}.Release|x64.Build.0 = Release|x64 + {43724CF4-FCE1-44FE-AB36-C86E3979B350}.Release|x86.ActiveCfg = Release|Win32 + {43724CF4-FCE1-44FE-AB36-C86E3979B350}.Release|x86.Build.0 = Release|Win32 + {3F8E0DF3-F402-40E0-8D78-44A094625D25}.Debug|x64.ActiveCfg = Debug|x64 + {3F8E0DF3-F402-40E0-8D78-44A094625D25}.Debug|x64.Build.0 = Debug|x64 + {3F8E0DF3-F402-40E0-8D78-44A094625D25}.Debug|x86.ActiveCfg = Debug|Win32 + {3F8E0DF3-F402-40E0-8D78-44A094625D25}.Debug|x86.Build.0 = Debug|Win32 + {3F8E0DF3-F402-40E0-8D78-44A094625D25}.Release|x64.ActiveCfg = Release|x64 + {3F8E0DF3-F402-40E0-8D78-44A094625D25}.Release|x64.Build.0 = Release|x64 + {3F8E0DF3-F402-40E0-8D78-44A094625D25}.Release|x86.ActiveCfg = Release|Win32 + {3F8E0DF3-F402-40E0-8D78-44A094625D25}.Release|x86.Build.0 = Release|Win32 + {5DC38E2B-0512-4140-8A1B-59952A5DC9CB}.Debug|x64.ActiveCfg = Debug|x64 + {5DC38E2B-0512-4140-8A1B-59952A5DC9CB}.Debug|x64.Build.0 = Debug|x64 + {5DC38E2B-0512-4140-8A1B-59952A5DC9CB}.Debug|x86.ActiveCfg = Debug|Win32 + {5DC38E2B-0512-4140-8A1B-59952A5DC9CB}.Debug|x86.Build.0 = Debug|Win32 + {5DC38E2B-0512-4140-8A1B-59952A5DC9CB}.Release|x64.ActiveCfg = Release|x64 + {5DC38E2B-0512-4140-8A1B-59952A5DC9CB}.Release|x64.Build.0 = Release|x64 + {5DC38E2B-0512-4140-8A1B-59952A5DC9CB}.Release|x86.ActiveCfg = Release|Win32 + {5DC38E2B-0512-4140-8A1B-59952A5DC9CB}.Release|x86.Build.0 = Release|Win32 + {B74812A1-C67D-4568-AF84-26CE2004D8BF}.Debug|x64.ActiveCfg = Debug|x64 + {B74812A1-C67D-4568-AF84-26CE2004D8BF}.Debug|x64.Build.0 = Debug|x64 + {B74812A1-C67D-4568-AF84-26CE2004D8BF}.Debug|x86.ActiveCfg = Debug|Win32 + {B74812A1-C67D-4568-AF84-26CE2004D8BF}.Debug|x86.Build.0 = Debug|Win32 + {B74812A1-C67D-4568-AF84-26CE2004D8BF}.Release|x64.ActiveCfg = Release|x64 + {B74812A1-C67D-4568-AF84-26CE2004D8BF}.Release|x64.Build.0 = Release|x64 + {B74812A1-C67D-4568-AF84-26CE2004D8BF}.Release|x86.ActiveCfg = Release|Win32 + {B74812A1-C67D-4568-AF84-26CE2004D8BF}.Release|x86.Build.0 = Release|Win32 EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE diff --git a/libs/libks/libks.vcxproj b/libs/libks/libks.vcxproj index 1a40dd2f02..5a82f7d0b5 100644 --- a/libs/libks/libks.vcxproj +++ b/libs/libks/libks.vcxproj @@ -27,22 +27,26 @@ DynamicLibrary true - v140 + v140_xp + Unicode DynamicLibrary false - v140 + v140_xp + Unicode DynamicLibrary true - v140 + v140_xp + Unicode DynamicLibrary false - v140 + v140_xp + Unicode @@ -72,7 +76,8 @@ $(SolutionDir)$(Platform)\$(Configuration)\ - true + + $(Platform)\$(Configuration)\ $(SolutionDir)$(Platform)\$(Configuration)\ @@ -84,6 +89,8 @@ ProgramDatabase Disabled %(AdditionalIncludeDirectories) + true + true MachineX86 @@ -98,6 +105,8 @@ EnableAllWarnings ProgramDatabase %(AdditionalIncludeDirectories) + true + true MachineX86 @@ -111,31 +120,49 @@ %(AdditionalIncludeDirectories) WIN32;_DEBUG;_WINDOWS;_USRDLL;LIBKS_EXPORTS;%(PreprocessorDefinitions) - ProgramDatabase + EditAndContinue EnableAllWarnings + true + true + + Windows + Debug + %(AdditionalIncludeDirectories) WIN32;NDEBUG;_WINDOWS;_USRDLL;LIBKS_EXPORTS;%(PreprocessorDefinitions) EnableAllWarnings + true + true + + Windows + + + - - + + + + + + + + + + - - - @@ -143,14 +170,12 @@ - - + + - - - \ No newline at end of file + diff --git a/libs/libks/libks.vcxproj.filters b/libs/libks/libks.vcxproj.filters index 3b2d013492..0e995bcf93 100644 --- a/libs/libks/libks.vcxproj.filters +++ b/libs/libks/libks.vcxproj.filters @@ -18,6 +18,12 @@ Source Files + + Source Files + + + Source Files + Source Files @@ -27,29 +33,44 @@ Source Files - + Source Files - + Source Files Source Files - - Source Files - - - Source Files - Source Files + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + - - Header Files - Header Files @@ -65,10 +86,10 @@ Header Files - + Header Files - + Header Files @@ -90,4 +111,4 @@ Header Files - \ No newline at end of file + diff --git a/libs/libks/src/bencode.c b/libs/libks/src/bencode.c new file mode 100644 index 0000000000..390c63bc29 --- /dev/null +++ b/libs/libks/src/bencode.c @@ -0,0 +1,2683 @@ +/* + * libbencodetools + * + * Written by Heikki Orsila and + * Janne Kulmala in 2011. + */ + +#include + +#include +#include +#include +#include +#include +#include +#include + +#define die(fmt, args...) do { fprintf(stderr, "bencode: fatal error: " fmt, ## args); abort(); } while (0) +#define warn(fmt, args...) do { fprintf(stderr, "bencode: warning: " fmt, ## args); } while (0) + +#define MAX_ALLOC (((size_t) -1) / sizeof(struct bencode *) / 2) +#define DICT_MAX_ALLOC (((size_t) -1) / sizeof(struct bencode_dict_node) / 2) + +struct ben_decode_ctx { + const char *data; + const size_t len; + size_t off; + int error; + int level; + char c; + int line; + struct bencode_type **types; +}; + +struct ben_encode_ctx { + char *data; + size_t size; + size_t pos; +}; + +/* + * Buffer size for fitting all unsigned long long and long long integers, + * assuming it is at most 64 bits. If long long is larger than 64 bits, + * an error is produced when too large an integer is converted. + */ +#define LONGLONGSIZE 21 + +static struct bencode *decode_printed(struct ben_decode_ctx *ctx); +static void inplace_ben_str(struct bencode_str *b, const char *s, size_t len); +static int resize_dict(struct bencode_dict *d, size_t newalloc); +static int resize_list(struct bencode_list *list, size_t newalloc); +static int unpack(const struct bencode *b, struct ben_decode_ctx *ctx, + va_list *vl); +static struct bencode *pack(struct ben_decode_ctx *ctx, va_list *vl); + +static size_t type_size(int type) +{ + switch (type) { + case BENCODE_BOOL: + return sizeof(struct bencode_bool); + case BENCODE_DICT: + return sizeof(struct bencode_dict); + case BENCODE_INT: + return sizeof(struct bencode_int); + case BENCODE_LIST: + return sizeof(struct bencode_list); + case BENCODE_STR: + return sizeof(struct bencode_str); + default: + die("Unknown type: %d\n", type); + } +} + +static void *alloc(int type) +{ + struct bencode *b = calloc(1, type_size(type)); + if (b == NULL) + return NULL; + b->type = type; + return b; +} + +void *ben_alloc_user(struct bencode_type *type) +{ + struct bencode_user *user = calloc(1, type->size); + if (user == NULL) + return NULL; + user->type = BENCODE_USER; + user->info = type; + return user; +} + +static int insufficient(struct ben_decode_ctx *ctx) +{ + ctx->error = BEN_INSUFFICIENT; + return -1; +} + +static int invalid(struct ben_decode_ctx *ctx) +{ + ctx->error = BEN_INVALID; + return -1; +} + +static int mismatch(struct ben_decode_ctx *ctx) +{ + ctx->error = BEN_MISMATCH; + return -1; +} + +void *ben_insufficient_ptr(struct ben_decode_ctx *ctx) +{ + ctx->error = BEN_INSUFFICIENT; + return NULL; +} + +void *ben_invalid_ptr(struct ben_decode_ctx *ctx) +{ + ctx->error = BEN_INVALID; + return NULL; +} + +void *ben_oom_ptr(struct ben_decode_ctx *ctx) +{ + ctx->error = BEN_NO_MEMORY; + return NULL; +} + +int ben_need_bytes(const struct ben_decode_ctx *ctx, size_t n) +{ + return ((ctx->off + n) <= ctx->len) ? 0 : -1; +} + +char ben_current_char(const struct ben_decode_ctx *ctx) +{ + return ctx->data[ctx->off]; +} + +const char *ben_current_buf(const struct ben_decode_ctx *ctx, size_t n) +{ + return ben_need_bytes(ctx, n) ? NULL : ctx->data + ctx->off; +} + +void ben_skip(struct ben_decode_ctx *ctx, size_t n) +{ + ctx->off += n; +} + +static struct bencode *internal_blob(void *data, size_t len) +{ + struct bencode_str *b = alloc(BENCODE_STR); + if (b == NULL) + return NULL; + b->s = data; + b->len = len; + assert(b->s[len] == 0); + return (struct bencode *) b; +} + +static void skip_to_next_line(struct ben_decode_ctx *ctx) +{ + for (; ctx->off < ctx->len; ctx->off++) { + if (ben_current_char(ctx) == '\n') { + ctx->line++; + ctx->off++; + break; + } + } +} + +static int seek_char(struct ben_decode_ctx *ctx) +{ + while (ctx->off < ctx->len) { + char c = ben_current_char(ctx); + if (isspace(c)) { + if (c == '\n') + ctx->line++; + ctx->off++; + } else if (c == '#') { + /* Skip comment */ + ctx->off++; + skip_to_next_line(ctx); + } else { + return 0; + } + } + return insufficient(ctx); +} + +/* + * Test if string 's' is located at current position. + * Increment current position and return 0 if the string matches. + * Returns -1 otherwise. The function avoids buffer overflow. + */ +static int try_match(struct ben_decode_ctx *ctx, const char *s) +{ + size_t n = strlen(s); + if (ben_need_bytes(ctx, n)) + return -1; + if (memcmp(ctx->data + ctx->off, s, n) != 0) + return -1; + ctx->off += n; + return 0; +} + +static int try_match_with_errors(struct ben_decode_ctx *ctx, const char *s) +{ + size_t n = strlen(s); + size_t left = ctx->len - ctx->off; + + assert(ctx->off <= ctx->len); + + if (left == 0) + return insufficient(ctx); + + if (left < n) { + if (memcmp(ctx->data + ctx->off, s, left) != 0) + return invalid(ctx); + return insufficient(ctx); + } + + if (memcmp(ctx->data + ctx->off, s, n) != 0) + return invalid(ctx); + + ctx->off += n; + return 0; +} + +int ben_allocate(struct bencode *b, size_t n) +{ + switch (b->type) { + case BENCODE_DICT: + return resize_dict(ben_dict_cast(b), n); + case BENCODE_LIST: + return resize_list(ben_list_cast(b), n); + default: + die("ben_allocate(): Unknown type %d\n", b->type); + } +} + +static struct bencode *clone_dict(const struct bencode_dict *d) +{ + struct bencode *key; + struct bencode *value; + struct bencode *newkey; + struct bencode *newvalue; + size_t pos; + struct bencode *newdict = ben_dict(); + if (newdict == NULL) + return NULL; + ben_dict_for_each(key, value, pos, (const struct bencode *) d) { + newkey = ben_clone(key); + newvalue = ben_clone(value); + if (newkey == NULL || newvalue == NULL) { + ben_free(newkey); + ben_free(newvalue); + goto error; + } + if (ben_dict_set(newdict, newkey, newvalue)) { + ben_free(newkey); + ben_free(newvalue); + goto error; + } + newkey = NULL; + newvalue = NULL; + } + return newdict; + +error: + ben_free(newdict); + return NULL; +} + +static struct bencode *clone_list(const struct bencode_list *list) +{ + struct bencode *value; + struct bencode *newvalue; + size_t pos; + struct bencode *newlist = ben_list(); + if (newlist == NULL) + return NULL; + ben_list_for_each(value, pos, (const struct bencode *) list) { + newvalue = ben_clone(value); + if (newvalue == NULL) + goto error; + if (ben_list_append(newlist, newvalue)) { + ben_free(newvalue); + goto error; + } + newvalue = NULL; + } + return newlist; + +error: + ben_free(newlist); + return NULL; +} + +static struct bencode *clone_str(const struct bencode_str *s) +{ + return ben_blob(s->s, s->len); +} + +static struct bencode *share_dict(const struct bencode_dict *d) +{ + struct bencode *newdict = ben_dict(); + if (newdict == NULL) + return NULL; + memcpy(newdict, d, sizeof(*d)); + ((struct bencode_dict *) newdict)->shared = 1; + return newdict; +} + +static struct bencode *share_list(const struct bencode_list *list) +{ + struct bencode *newlist = ben_list(); + if (newlist == NULL) + return NULL; + memcpy(newlist, list, sizeof(*list)); + ((struct bencode_list *) newlist)->shared = 1; + return newlist; +} + +struct bencode *ben_clone(const struct bencode *b) +{ + switch (b->type) { + case BENCODE_BOOL: + return ben_bool(ben_bool_const_cast(b)->b); + case BENCODE_DICT: + return clone_dict(ben_dict_const_cast(b)); + case BENCODE_INT: + return ben_int(ben_int_const_cast(b)->ll); + case BENCODE_LIST: + return clone_list(ben_list_const_cast(b)); + case BENCODE_STR: + return clone_str(ben_str_const_cast(b)); + default: + die("Invalid type %c\n", b->type); + } +} + +struct bencode *ben_shared_clone(const struct bencode *b) +{ + switch (b->type) { + case BENCODE_DICT: + return share_dict(ben_dict_const_cast(b)); + break; + case BENCODE_LIST: + return share_list(ben_list_const_cast(b)); + break; + default: + return ben_clone(b); + } +} + +static int cmp_dict(const struct bencode *a, const struct bencode *b) +{ + size_t len = ben_dict_len(a); + size_t pos; + struct bencode *key; + struct bencode *va; + struct bencode *vb; + int ret = 0; + struct bencode_keyvalue *pairs; + + if (len != ben_dict_len(b)) { + /* Returning any non-zero value is allowed */ + return (len < ben_dict_len(b)) ? -1 : 1; + } + + pairs = ben_dict_ordered_items(a); + for (pos = 0; pos < len; pos++) { + key = pairs[pos].key; + va = pairs[pos].value; + vb = ben_dict_get(b, key); + if (vb == NULL) { + /* Returning any non-zero value is allowed */ + ret = (a < b) ? -1 : 1; + break; + } + ret = ben_cmp(va, vb); + if (ret) + break; + } + + free(pairs); + return ret; +} + +static int cmp_list(const struct bencode *a, const struct bencode *b) +{ + const struct bencode_list *la; + const struct bencode_list *lb; + struct bencode *va; + struct bencode *vb; + size_t cmplen; + size_t i; + int ret; + + la = ben_list_const_cast(a); + lb = ben_list_const_cast(b); + cmplen = (la->n <= lb->n) ? la->n : lb->n; + + for (i = 0; i < cmplen; ++i) { + va = ben_list_get(a, i); + vb = ben_list_get(b, i); + ret = ben_cmp(va, vb); + if (ret) + return ret; + } + if (la->n != lb->n) + return (la->n < lb->n) ? -1 : 1; + return 0; +} + +int ben_cmp(const struct bencode *a, const struct bencode *b) +{ + size_t cmplen; + int ret; + const struct bencode_int *ia; + const struct bencode_int *ib; + const struct bencode_str *sa; + const struct bencode_str *sb; + const struct bencode_user *ua; + const struct bencode_user *ub; + + if (a->type != b->type) + return (a->type == BENCODE_INT) ? -1 : 1; + + switch (a->type) { + case BENCODE_INT: + ia = ben_int_const_cast(a); + ib = ben_int_const_cast(b); + if (ia->ll < ib->ll) + return -1; + if (ib->ll < ia->ll) + return 1; + return 0; + case BENCODE_STR: + sa = ben_str_const_cast(a); + sb = ben_str_const_cast(b); + cmplen = (sa->len <= sb->len) ? sa->len : sb->len; + ret = memcmp(sa->s, sb->s, cmplen); + if (ret) + return ret < 0 ? -1 : 1; + if (sa->len != sb->len) + return (sa->len < sb->len) ? -1 : 1; + return 0; + case BENCODE_DICT: + return cmp_dict(a, b); + case BENCODE_LIST: + return cmp_list(a, b); + case BENCODE_USER: + ua = ben_user_const_cast(a); + ub = ben_user_const_cast(b); + if (ua->info != ub->info) + return (a < b) ? -1 : 1; + return ua->info->cmp(a, b); + default: + die("Invalid type %c\n", b->type); + } +} + +int ben_cmp_with_str(const struct bencode *a, const char *s) +{ + struct bencode_str b; + inplace_ben_str(&b, s, strlen(s)); + return ben_cmp(a, (struct bencode *) &b); +} + +int ben_cmp_qsort(const void *a, const void *b) +{ + const struct bencode *akey = ((const struct bencode_keyvalue *) a)->key; + const struct bencode *bkey = ((const struct bencode_keyvalue *) b)->key; + return ben_cmp(akey, bkey); +} + +static struct bencode *decode_bool(struct ben_decode_ctx *ctx) +{ + struct bencode_bool *b; + char value; + char c; + if (ben_need_bytes(ctx, 2)) + return ben_insufficient_ptr(ctx); + ctx->off++; + + c = ben_current_char(ctx); + if (c != '0' && c != '1') + return ben_invalid_ptr(ctx); + + value = (c == '1'); + b = alloc(BENCODE_BOOL); + if (b == NULL) + return ben_oom_ptr(ctx); + + b->b = value; + ctx->off++; + return (struct bencode *) b; +} + +static size_t hash_bucket(long long hash, const struct bencode_dict *d) +{ + return hash & (d->alloc - 1); +} + +static size_t hash_bucket_head(long long hash, const struct bencode_dict *d) +{ + if (d->buckets == NULL) + return -1; + return d->buckets[hash_bucket(hash, d)]; +} + +static int resize_dict(struct bencode_dict *d, size_t newalloc) +{ + size_t *newbuckets; + struct bencode_dict_node *newnodes;; + size_t pos; + + if (newalloc == -1) { + if (d->alloc >= DICT_MAX_ALLOC) + return -1; + + if (d->alloc == 0) + newalloc = 4; + else + newalloc = d->alloc * 2; + } else { + size_t x; + if (newalloc < d->n || newalloc > DICT_MAX_ALLOC) + return -1; + /* Round to next power of two */ + x = 1; + while (x < newalloc) + x <<= 1; + assert(x >= newalloc); + newalloc = x; + if (newalloc > DICT_MAX_ALLOC) + return -1; + } + + /* size must be a power of two */ + assert((newalloc & (newalloc - 1)) == 0); + + newbuckets = realloc(d->buckets, sizeof(newbuckets[0]) * newalloc); + newnodes = realloc(d->nodes, sizeof(newnodes[0]) * newalloc); + if (newnodes == NULL || newbuckets == NULL) { + free(newnodes); + free(newbuckets); + return -1; + } + + d->alloc = newalloc; + d->buckets = newbuckets; + d->nodes = newnodes; + + /* Clear all buckets */ + memset(d->buckets, -1, d->alloc * sizeof(d->buckets[0])); + + /* Reinsert nodes into buckets */ + for (pos = 0; pos < d->n; pos++) { + struct bencode_dict_node *node = &d->nodes[pos]; + size_t bucket = hash_bucket(node->hash, d); + node->next = d->buckets[bucket]; + d->buckets[bucket] = pos; + } + + return 0; +} + +/* The string/binary object hash is copied from Python */ +static long long str_hash(const unsigned char *s, size_t len) +{ + long long hash; + size_t i; + if (len == 0) + return 0; + hash = s[0] << 7; + for (i = 0; i < len; i++) + hash = (1000003 * hash) ^ s[i]; + hash ^= len; + if (hash == -1) + hash = -2; + return hash; +} + +long long ben_str_hash(const struct bencode *b) +{ + const struct bencode_str *bstr = ben_str_const_cast(b); + const unsigned char *s = (unsigned char *) bstr->s; + return str_hash(s, bstr->len); +} + +long long ben_int_hash(const struct bencode *b) +{ + long long x = ben_int_const_cast(b)->ll; + return (x == -1) ? -2 : x; +} + +long long ben_hash(const struct bencode *b) +{ + switch (b->type) { + case BENCODE_INT: + return ben_int_hash(b); + case BENCODE_STR: + return ben_str_hash(b); + default: + die("hash: Invalid type: %d\n", b->type); + } +} + +static struct bencode *decode_dict(struct ben_decode_ctx *ctx) +{ + struct bencode *key; + struct bencode *lastkey = NULL; + struct bencode *value; + struct bencode_dict *d; + + d = alloc(BENCODE_DICT); + if (d == NULL) { + warn("Not enough memory for dict\n"); + return ben_oom_ptr(ctx); + } + + ctx->off++; + + while (ctx->off < ctx->len && ben_current_char(ctx) != 'e') { + key = ben_ctx_decode(ctx); + if (key == NULL) + goto error; + if (key->type != BENCODE_INT && key->type != BENCODE_STR) { + ben_free(key); + key = NULL; + ctx->error = BEN_INVALID; + warn("Invalid dict key type\n"); + goto error; + } + + if (lastkey != NULL && ben_cmp(lastkey, key) >= 0) { + ben_free(key); + key = NULL; + ctx->error = BEN_INVALID; + goto error; + } + + value = ben_ctx_decode(ctx); + if (value == NULL) { + ben_free(key); + key = NULL; + goto error; + } + + if (ben_dict_set((struct bencode *) d, key, value)) { + ben_free(key); + ben_free(value); + key = NULL; + value = NULL; + ctx->error = BEN_NO_MEMORY; + goto error; + } + + lastkey = key; + } + if (ctx->off >= ctx->len) { + ctx->error = BEN_INSUFFICIENT; + goto error; + } + + ctx->off++; + + return (struct bencode *) d; + +error: + ben_free((struct bencode *) d); + return NULL; +} + +static size_t find(const struct ben_decode_ctx *ctx, char c) +{ + char *match = memchr(ctx->data + ctx->off, c, ctx->len - ctx->off); + if (match == NULL) + return -1; + return (size_t) (match - ctx->data); +} + +/* off is the position of first number in */ +static int read_long_long(long long *ll, struct ben_decode_ctx *ctx, int c) +{ + char buf[LONGLONGSIZE]; /* fits all 64 bit integers */ + char *endptr; + size_t slen; + size_t pos = find(ctx, c); + + if (pos == -1) + return insufficient(ctx); + + slen = pos - ctx->off; + if (slen == 0 || slen >= sizeof buf) + return invalid(ctx); + + assert(slen < sizeof buf); + memcpy(buf, ctx->data + ctx->off, slen); + buf[slen] = 0; + + if (buf[0] != '-' && !isdigit(buf[0])) + return invalid(ctx); + + errno = 0; + *ll = strtoll(buf, &endptr, 10); + if (errno == ERANGE || *endptr != 0) + return invalid(ctx); + + /* + * Demand a unique encoding for all integers. + * Zero may not begin with a (minus) sign. + * Non-zero integers may not have leading zeros in the encoding. + */ + if (buf[0] == '-' && buf[1] == '0') + return invalid(ctx); + if (buf[0] == '0' && pos != (ctx->off + 1)) + return invalid(ctx); + + ctx->off = pos + 1; + return 0; +} + +static struct bencode *decode_int(struct ben_decode_ctx *ctx) +{ + struct bencode_int *b; + long long ll; + ctx->off++; + if (read_long_long(&ll, ctx, 'e')) + return NULL; + b = alloc(BENCODE_INT); + if (b == NULL) + return ben_oom_ptr(ctx); + b->ll = ll; + return (struct bencode *) b; +} + +static int resize_list(struct bencode_list *list, size_t newalloc) +{ + struct bencode **newvalues; + size_t newsize; + + if (newalloc == -1) { + if (list->alloc >= MAX_ALLOC) + return -1; + if (list->alloc == 0) + newalloc = 4; + else + newalloc = list->alloc * 2; + } else { + if (newalloc < list->n || newalloc > MAX_ALLOC) + return -1; + } + + newsize = sizeof(list->values[0]) * newalloc; + newvalues = realloc(list->values, newsize); + if (newvalues == NULL) + return -1; + list->alloc = newalloc; + list->values = newvalues; + return 0; +} + +static struct bencode *decode_list(struct ben_decode_ctx *ctx) +{ + struct bencode_list *l = alloc(BENCODE_LIST); + if (l == NULL) + return ben_oom_ptr(ctx); + + ctx->off++; + + while (ctx->off < ctx->len && ben_current_char(ctx) != 'e') { + struct bencode *b = ben_ctx_decode(ctx); + if (b == NULL) + goto error; + if (ben_list_append((struct bencode *) l, b)) { + ben_free(b); + ctx->error = BEN_NO_MEMORY; + goto error; + } + } + + if (ctx->off >= ctx->len) { + ctx->error = BEN_INSUFFICIENT; + goto error; + } + + ctx->off++; + return (struct bencode *) l; + +error: + ben_free((struct bencode *) l); + return NULL; +} + +static size_t read_size_t(struct ben_decode_ctx *ctx, int c) +{ + long long ll; + size_t s; + if (read_long_long(&ll, ctx, c)) + return -1; + if (ll < 0) + return invalid(ctx); + /* + * Test that information is not lost when converting from long long + * to size_t + */ + s = (size_t) ll; + if (ll != (long long) s) + return invalid(ctx); + return s; +} + +static struct bencode *decode_str(struct ben_decode_ctx *ctx) +{ + struct bencode *b; + size_t datalen = read_size_t(ctx, ':'); /* Read the string length */ + if (datalen == -1) + return NULL; + + if (ben_need_bytes(ctx, datalen)) + return ben_insufficient_ptr(ctx); + + /* Allocate string structure and copy data into it */ + b = ben_blob(ctx->data + ctx->off, datalen); + ctx->off += datalen; + return b; +} + +struct bencode *ben_ctx_decode(struct ben_decode_ctx *ctx) +{ + char c; + struct bencode_type *type; + struct bencode *b; + ctx->level++; + if (ctx->level > 256) + return ben_invalid_ptr(ctx); + + if (ctx->off == ctx->len) + return ben_insufficient_ptr(ctx); + + assert (ctx->off < ctx->len); + c = ben_current_char(ctx); + switch (c) { + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + b = decode_str(ctx); + break; + case 'b': + b = decode_bool(ctx); + break; + case 'd': + b = decode_dict(ctx); + break; + case 'i': + b = decode_int(ctx); + break; + case 'l': + b = decode_list(ctx); + break; + default: + if (ctx->types && (unsigned char) c < 128) { + type = ctx->types[(unsigned char) c]; + if (type) { + ctx->off++; + b = type->decode(ctx); + } else + return ben_invalid_ptr(ctx); + } else + return ben_invalid_ptr(ctx); + } + ctx->level--; + return b; +} + +struct bencode *ben_decode(const void *data, size_t len) +{ + struct ben_decode_ctx ctx = {.data = data, .len = len}; + struct bencode *b = ben_ctx_decode(&ctx); + if (b != NULL && ctx.off != len) { + ben_free(b); + return NULL; + } + return b; +} + +struct bencode *ben_decode2(const void *data, size_t len, size_t *off, int *error) +{ + struct ben_decode_ctx ctx = {.data = data, .len = len, .off = *off}; + struct bencode *b = ben_ctx_decode(&ctx); + *off = ctx.off; + if (error != NULL) { + assert((b != NULL) ^ (ctx.error != 0)); + *error = ctx.error; + } + return b; +} + +struct bencode *ben_decode3(const void *data, size_t len, size_t *off, int *error, struct bencode_type *types[128]) +{ + struct ben_decode_ctx ctx = {.data = data, .len = len, .off = *off, + .types = types}; + struct bencode *b = ben_ctx_decode(&ctx); + *off = ctx.off; + if (error != NULL) { + assert((b != NULL) ^ (ctx.error != 0)); + *error = ctx.error; + } + return b; +} + +static struct bencode *decode_printed_bool(struct ben_decode_ctx *ctx) +{ + struct bencode *b; + int bval = -1; + + if (try_match(ctx, "True")) { + if (ben_need_bytes(ctx, 4)) + return ben_insufficient_ptr(ctx); + } else { + bval = 1; + } + + if (bval < 0) { + /* It's not 'True', so it can only be 'False'. Verify it. */ + if (try_match_with_errors(ctx, "False")) + return NULL; + bval = 0; + } + + assert(bval == 0 || bval == 1); + b = ben_bool(bval); + if (b == NULL) + return ben_oom_ptr(ctx); + return b; +} + +static struct bencode *decode_printed_dict(struct ben_decode_ctx *ctx) +{ + struct bencode *d = ben_dict(); + struct bencode *key = NULL; + struct bencode *value = NULL; + + if (d == NULL) + return ben_oom_ptr(ctx); + + ctx->off++; + + while (1) { + if (seek_char(ctx)) + goto nullpath; + if (ben_current_char(ctx) == '}') { + ctx->off++; + break; + } + + key = decode_printed(ctx); + if (key == NULL) + goto nullpath; + + if (seek_char(ctx)) + goto nullpath; + if (ben_current_char(ctx) != ':') + goto invalidpath; + ctx->off++; + + value = decode_printed(ctx); + if (value == NULL) + goto nullpath; + + if (ben_dict_set(d, key, value)) { + ben_free(key); + ben_free(value); + ben_free(d); + return ben_oom_ptr(ctx); + } + key = NULL; + value = NULL; + + if (seek_char(ctx)) + goto nullpath; + if (ben_current_char(ctx) == ',') + ctx->off++; + else if (ben_current_char(ctx) != '}') + goto invalidpath; + } + return d; + +invalidpath: + ben_free(key); + ben_free(value); + ben_free(d); + return ben_invalid_ptr(ctx); + +nullpath: + ben_free(key); + ben_free(value); + ben_free(d); + return NULL; +} + +static struct bencode *decode_printed_int(struct ben_decode_ctx *ctx) +{ + long long ll; + char buf[LONGLONGSIZE]; + char *end; + size_t pos = 0; + struct bencode *b; + int gotzero = 0; + int base = 10; + int neg = 0; + + if (ben_current_char(ctx) == '-') { + neg = 1; + ctx->off++; + } + if (ctx->off == ctx->len) + return ben_insufficient_ptr(ctx); + + if (ben_current_char(ctx) == '0') { + buf[pos] = '0'; + pos++; + ctx->off++; + gotzero = 1; + } + + if (gotzero) { + if (ctx->off == ctx->len) { + ll = 0; + goto returnwithval; + } + if (ben_current_char(ctx) == 'x') { + pos = 0; + base = 16; + ctx->off++; + if (ctx->off == ctx->len) + return ben_insufficient_ptr(ctx); + } else if (isdigit(ben_current_char(ctx))) { + base = 8; + } + } else { + if (ctx->off == ctx->len) + return ben_insufficient_ptr(ctx); + } + + while (ctx->off < ctx->len && pos < sizeof buf) { + char c = ben_current_char(ctx); + if (base == 16) { + if (!isxdigit(c)) + break; + } else { + if (!isdigit(c)) + break; + } + buf[pos] = c; + pos++; + ctx->off++; + } + if (pos == 0 || pos == sizeof buf) + return ben_invalid_ptr(ctx); + buf[pos] = 0; + ll = strtoll(buf, &end, base); + if (*end != 0) + return ben_invalid_ptr(ctx); + +returnwithval: + if (neg) + ll = -ll; + b = ben_int(ll); + if (b == NULL) + return ben_oom_ptr(ctx); + return b; +} + +static struct bencode *decode_printed_list(struct ben_decode_ctx *ctx) +{ + struct bencode *l = ben_list(); + struct bencode *b = NULL; + + if (l == NULL) + return ben_oom_ptr(ctx); + + ctx->off++; + + while (1) { + if (seek_char(ctx)) + goto nullpath; + if (ben_current_char(ctx) == ']') { + ctx->off++; + break; + } + b = decode_printed(ctx); + if (b == NULL) + goto nullpath; + if (ben_list_append(l, b)) { + ben_free(b); + ben_free(l); + return ben_oom_ptr(ctx); + } + b = NULL; + + if (seek_char(ctx)) + goto nullpath; + if (ben_current_char(ctx) == ',') + ctx->off++; + else if (ben_current_char(ctx) != ']') { + ben_free(l); + return ben_invalid_ptr(ctx); + } + } + return l; + +nullpath: + ben_free(b); + ben_free(l); + return NULL; +} + +static struct bencode *decode_printed_str(struct ben_decode_ctx *ctx) +{ + size_t pos; + char *s = NULL; + size_t len = 0; + char initial = ben_current_char(ctx); + struct bencode *b; + + ctx->off++; + pos = ctx->off; + while (pos < ctx->len) { + char c = ctx->data[pos]; + if (!isprint(c)) + return ben_invalid_ptr(ctx); + if (c == initial) + break; + len++; + pos++; + if (c != '\\') + continue; /* Normal printable char, e.g. 'a' */ + /* Handle '\\' */ + if (pos == ctx->len) + return ben_insufficient_ptr(ctx); + + c = ctx->data[pos]; + pos++; + if (c == 'x') { + /* hexadecimal value: \xHH */ + pos += 2; + } + } + if (pos >= ctx->len) + return ben_insufficient_ptr(ctx); + + s = malloc(len + 1); + if (s == NULL) + return ben_oom_ptr(ctx); + + pos = 0; + while (ctx->off < ctx->len) { + char c = ben_current_char(ctx); + assert(isprint(c)); + if (c == initial) + break; + assert(pos < len); + ctx->off++; + if (c != '\\') { + s[pos] = c; + pos++; + continue; /* Normal printable char, e.g. 'a' */ + } + /* Handle '\\' */ + + /* + * Note, we do assert because we have already verified in the + * previous loop that there is sufficient data. + */ + assert(ctx->off != ctx->len); + c = ben_current_char(ctx); + ctx->off++; + if (c == 'x') { + /* hexadecimal value: \xHH */ + char *end; + unsigned long x; + char buf[3]; + assert((ctx->off + 1) < ctx->len); + buf[0] = ctx->data[ctx->off + 0]; + buf[1] = ctx->data[ctx->off + 1]; + buf[2] = 0; + ctx->off += 2; + x = strtoul(buf, &end, 16); + if (*end != 0) + goto invalid; + assert(x < 256); + c = (char) x; + } + s[pos] = c; + pos++; + } + assert(pos == len); + if (ctx->off >= ctx->len) + return ben_insufficient_ptr(ctx); + ctx->off++; + + s[pos] = 0; /* the area must always be zero terminated! */ + + b = internal_blob(s, len); + if (b == NULL) { + free(s); + return ben_oom_ptr(ctx); + } + return b; + +invalid: + free(s); + return ben_invalid_ptr(ctx); +} + +static struct bencode *decode_printed(struct ben_decode_ctx *ctx) +{ + struct bencode *b; + + ctx->level++; + if (ctx->level > 256) + return ben_invalid_ptr(ctx); + + if (seek_char(ctx)) + return NULL; + + switch (ben_current_char(ctx)) { + case '\'': + case '"': + b = decode_printed_str(ctx); + break; + case '-': + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + b = decode_printed_int(ctx); + break; + case 'F': + case 'T': + b = decode_printed_bool(ctx); + break; + case '[': + b = decode_printed_list(ctx); + break; + case '{': + b = decode_printed_dict(ctx); + break; + default: + return ben_invalid_ptr(ctx); + } + ctx->level--; + return b; +} + +struct bencode *ben_decode_printed(const void *data, size_t len) +{ + struct ben_decode_ctx ctx = {.data = data, .len = len}; + return decode_printed(&ctx); +} + +struct bencode *ben_decode_printed2(const void *data, size_t len, size_t *off, struct bencode_error *error) +{ + struct ben_decode_ctx ctx = {.data = data, .len = len, .off = *off}; + struct bencode *b = decode_printed(&ctx); + *off = ctx.off; + if (error != NULL) { + assert((b != NULL) ^ (ctx.error != 0)); + error->error = ctx.error; + if (b != NULL) { + error->off = 0; + error->line = 0; + } else { + error->off = ctx.off; + error->line = ctx.line; + } + } + return b; +} + +static void free_dict(struct bencode_dict *d) +{ + size_t pos; + if (d->shared) + return; + for (pos = 0; pos < d->n; pos++) { + ben_free(d->nodes[pos].key); + ben_free(d->nodes[pos].value); + d->nodes[pos].key = NULL; + d->nodes[pos].value = NULL; + } + free(d->buckets); + free(d->nodes); +} + +static void free_list(struct bencode_list *list) +{ + size_t pos; + if (list->shared) + return; + for (pos = 0; pos < list->n; pos++) { + ben_free(list->values[pos]); + list->values[pos] = NULL; + } + free(list->values); +} + +int ben_put_char(struct ben_encode_ctx *ctx, char c) +{ + if (ctx->pos >= ctx->size) + return -1; + ctx->data[ctx->pos] = c; + ctx->pos++; + return 0; +} + +int ben_put_buffer(struct ben_encode_ctx *ctx, const void *buf, size_t len) +{ + if ((ctx->pos + len) > ctx->size) + return -1; + memcpy(ctx->data + ctx->pos, buf, len); + ctx->pos += len; + return 0; +} + +static int puthexchar(struct ben_encode_ctx *ctx, unsigned char hex) +{ + char buf[5]; + int len = snprintf(buf, sizeof buf, "\\x%.2x", hex); + assert(len == 4); + return ben_put_buffer(ctx, buf, len); +} + +static int putlonglong(struct ben_encode_ctx *ctx, long long ll) +{ + char buf[LONGLONGSIZE]; + int len = snprintf(buf, sizeof buf, "%lld", ll); + assert(len > 0); + return ben_put_buffer(ctx, buf, len); +} + +static int putunsignedlonglong(struct ben_encode_ctx *ctx, unsigned long long llu) +{ + char buf[LONGLONGSIZE]; + int len = snprintf(buf, sizeof buf, "%llu", llu); + assert(len > 0); + return ben_put_buffer(ctx, buf, len); +} + +static int putstr(struct ben_encode_ctx *ctx, char *s) +{ + return ben_put_buffer(ctx, s, strlen(s)); +} + +static int print(struct ben_encode_ctx *ctx, const struct bencode *b) +{ + const struct bencode_bool *boolean; + const struct bencode_int *integer; + const struct bencode_list *list; + const struct bencode_str *s; + size_t i; + size_t len; + struct bencode_keyvalue *pairs; + + switch (b->type) { + case BENCODE_BOOL: + boolean = ben_bool_const_cast(b); + return putstr(ctx, boolean->b ? "True" : "False"); + + case BENCODE_DICT: + if (ben_put_char(ctx, '{')) + return -1; + + pairs = ben_dict_ordered_items(b); + if (pairs == NULL) { + warn("No memory for dict serialization\n"); + return -1; + } + + len = ben_dict_len(b); + for (i = 0; i < len; i++) { + if (print(ctx, pairs[i].key)) + break; + if (putstr(ctx, ": ")) + break; + if (print(ctx, pairs[i].value)) + break; + if (i < (len - 1)) { + if (putstr(ctx, ", ")) + break; + } + } + free(pairs); + pairs = NULL; + if (i < len) + return -1; + + return ben_put_char(ctx, '}'); + + case BENCODE_INT: + integer = ben_int_const_cast(b); + + if (putlonglong(ctx, integer->ll)) + return -1; + + return 0; + + case BENCODE_LIST: + if (ben_put_char(ctx, '[')) + return -1; + list = ben_list_const_cast(b); + for (i = 0; i < list->n; i++) { + if (print(ctx, list->values[i])) + return -1; + if (i < (list->n - 1) && putstr(ctx, ", ")) + return -1; + } + return ben_put_char(ctx, ']'); + + case BENCODE_STR: + s = ben_str_const_cast(b); + if (ben_put_char(ctx, '\'')) + return -1; + for (i = 0; i < s->len; i++) { + if (!isprint(s->s[i])) { + if (puthexchar(ctx, s->s[i])) + return -1; + continue; + } + + switch (s->s[i]) { + case '\'': + case '\\': + /* Need escape character */ + if (ben_put_char(ctx, '\\')) + return -1; + default: + if (ben_put_char(ctx, s->s[i])) + return -1; + break; + } + } + return ben_put_char(ctx, '\''); + default: + die("serialization type %d not implemented\n", b->type); + } +} + +static size_t get_printed_size(const struct bencode *b) +{ + size_t pos; + const struct bencode_bool *boolean; + const struct bencode_dict *d; + const struct bencode_int *i; + const struct bencode_list *l; + const struct bencode_str *s; + size_t size = 0; + char buf[1]; + + switch (b->type) { + case BENCODE_BOOL: + boolean = ben_bool_const_cast(b); + return boolean->b ? 4 : 5; /* "True" and "False" */ + case BENCODE_DICT: + size++; /* "{" */ + d = ben_dict_const_cast(b); + for (pos = 0; pos < d->n; pos++) { + size += get_printed_size(d->nodes[pos].key); + size += 2; /* ": " */ + size += get_printed_size(d->nodes[pos].value); + if (pos < (d->n - 1)) + size += 2; /* ", " */ + } + size++; /* "}" */ + return size; + case BENCODE_INT: + i = ben_int_const_cast(b); + return snprintf(buf, 0, "%lld", i->ll); + case BENCODE_LIST: + size++; /* "[" */ + l = ben_list_const_cast(b); + for (pos = 0; pos < l->n; pos++) { + size += get_printed_size(l->values[pos]); + if (pos < (l->n - 1)) + size += 2; /* ", " */ + } + size++; /* "]" */ + return size; + case BENCODE_STR: + s = ben_str_const_cast(b); + size++; /* ' */ + for (pos = 0; pos < s->len; pos++) { + if (!isprint(s->s[pos])) { + size += 4; /* "\xDD" */ + continue; + } + switch (s->s[pos]) { + case '\'': + case '\\': + size += 2; /* escaped characters */ + break; + default: + size++; + break; + } + } + size++; /* ' */ + return size; + default: + die("Unknown type: %c\n", b->type); + } +} + +int ben_ctx_encode(struct ben_encode_ctx *ctx, const struct bencode *b) +{ + const struct bencode_bool *boolean; + const struct bencode_int *integer; + const struct bencode_list *list; + const struct bencode_str *s; + const struct bencode_user *u; + size_t i; + size_t len; + struct bencode_keyvalue *pairs; + + switch (b->type) { + case BENCODE_BOOL: + boolean = ben_bool_const_cast(b); + return putstr(ctx, boolean->b ? "b1" : "b0"); + + case BENCODE_DICT: + if (ben_put_char(ctx, 'd')) + return -1; + + pairs = ben_dict_ordered_items(b); + if (pairs == NULL) { + warn("No memory for dict serialization\n"); + return -1; + } + + len = ben_dict_len(b); + for (i = 0; i < len; i++) { + if (ben_ctx_encode(ctx, pairs[i].key)) + break; + if (ben_ctx_encode(ctx, pairs[i].value)) + break; + } + free(pairs); + pairs = NULL; + if (i < len) + return -1; + + return ben_put_char(ctx, 'e'); + + case BENCODE_INT: + if (ben_put_char(ctx, 'i')) + return -1; + integer = ben_int_const_cast(b); + if (putlonglong(ctx, integer->ll)) + return -1; + return ben_put_char(ctx, 'e'); + + case BENCODE_LIST: + if (ben_put_char(ctx, 'l')) + return -1; + + list = ben_list_const_cast(b); + for (i = 0; i < list->n; i++) { + if (ben_ctx_encode(ctx, list->values[i])) + return -1; + } + + return ben_put_char(ctx, 'e'); + + case BENCODE_STR: + s = ben_str_const_cast(b); + if (putunsignedlonglong(ctx, ((long long) s->len))) + return -1; + if (ben_put_char(ctx, ':')) + return -1; + return ben_put_buffer(ctx, s->s, s->len); + + case BENCODE_USER: + u = ben_user_const_cast(b); + return u->info->encode(ctx, b); + + default: + die("serialization type %d not implemented\n", b->type); + } +} + +static size_t get_size(const struct bencode *b) +{ + size_t pos; + const struct bencode_dict *d; + const struct bencode_int *i; + const struct bencode_list *l; + const struct bencode_str *s; + const struct bencode_user *u; + size_t size = 0; + char buf[1]; + + switch (b->type) { + case BENCODE_BOOL: + return 2; + case BENCODE_DICT: + d = ben_dict_const_cast(b); + for (pos = 0; pos < d->n; pos++) { + size += get_size(d->nodes[pos].key); + size += get_size(d->nodes[pos].value); + } + return size + 2; + case BENCODE_INT: + i = ben_int_const_cast(b); + return 2 + snprintf(buf, 0, "%lld", i->ll); + case BENCODE_LIST: + l = ben_list_const_cast(b); + for (pos = 0; pos < l->n; pos++) + size += get_size(l->values[pos]); + return size + 2; + case BENCODE_STR: + s = ben_str_const_cast(b); + return snprintf(buf, 0, "%zu", s->len) + 1 + s->len; + case BENCODE_USER: + u = ben_user_const_cast(b); + return u->info->get_size(b); + default: + die("Unknown type: %c\n", b->type); + } +} + +size_t ben_encoded_size(const struct bencode *b) +{ + return get_size(b); +} + +void *ben_encode(size_t *len, const struct bencode *b) +{ + size_t size = get_size(b); + void *data = malloc(size); + struct ben_encode_ctx ctx = {.data = data, .size = size}; + if (data == NULL) { + warn("No memory to encode\n"); + return NULL; + } + if (ben_ctx_encode(&ctx, b)) { + free(ctx.data); + return NULL; + } + assert(ctx.pos == size); + *len = ctx.pos; + return data; +} + +size_t ben_encode2(char *data, size_t maxlen, const struct bencode *b) +{ + struct ben_encode_ctx ctx = {.data = data, .size = maxlen, .pos = 0}; + if (ben_ctx_encode(&ctx, b)) + return -1; + return ctx.pos; +} + +void ben_free(struct bencode *b) +{ + struct bencode_str *s; + struct bencode_user *u; + size_t size; + if (b == NULL) + return; + switch (b->type) { + case BENCODE_BOOL: + break; + case BENCODE_DICT: + free_dict(ben_dict_cast(b)); + break; + case BENCODE_INT: + break; + case BENCODE_LIST: + free_list(ben_list_cast(b)); + break; + case BENCODE_STR: + s = ben_str_cast(b); + free(s->s); + break; + case BENCODE_USER: + u = ben_user_cast(b); + if (u->info->free) + u->info->free(b); + break; + default: + die("invalid type: %d\n", b->type); + } + + if (b->type == BENCODE_USER) + size = ((struct bencode_user *) b)->info->size; + else + size = type_size(b->type); + memset(b, -1, size); /* data poison */ + free(b); +} + +struct bencode *ben_blob(const void *data, size_t len) +{ + struct bencode_str *b = alloc(BENCODE_STR); + if (b == NULL) + return NULL; + /* Allocate one extra byte for zero termination for convenient use */ + b->s = malloc(len + 1); + if (b->s == NULL) { + free(b); + return NULL; + } + memcpy(b->s, data, len); + b->len = len; + b->s[len] = 0; + return (struct bencode *) b; +} + +struct bencode *ben_bool(int boolean) +{ + struct bencode_bool *b = alloc(BENCODE_BOOL); + if (b == NULL) + return NULL; + b->b = boolean ? 1 : 0; + return (struct bencode *) b; +} + +struct bencode *ben_dict(void) +{ + return alloc(BENCODE_DICT); +} + +struct bencode *ben_dict_get(const struct bencode *dict, const struct bencode *key) +{ + const struct bencode_dict *d = ben_dict_const_cast(dict); + long long hash = ben_hash(key); + size_t pos = hash_bucket_head(hash, d); + while (pos != -1) { + assert(pos < d->n); + if (d->nodes[pos].hash == hash && + ben_cmp(d->nodes[pos].key, key) == 0) + return d->nodes[pos].value; + pos = d->nodes[pos].next; + } + return NULL; +} + +/* + * Note, we do not re-allocate memory, so one may not call ben_free for these + * instances. These are only used to optimize speed. + */ +static void inplace_ben_str(struct bencode_str *b, const char *s, size_t len) +{ + b->type = BENCODE_STR; + b->len = len; + b->s = (char *) s; +} + +static void inplace_ben_int(struct bencode_int *i, long long ll) +{ + i->type = BENCODE_INT; + i->ll = ll; +} + +struct bencode *ben_dict_get_by_str(const struct bencode *dict, const char *key) +{ + struct bencode_str s; + inplace_ben_str(&s, key, strlen(key)); + return ben_dict_get(dict, (struct bencode *) &s); +} + +struct bencode *ben_dict_get_by_int(const struct bencode *dict, long long key) +{ + struct bencode_int i; + inplace_ben_int(&i, key); + return ben_dict_get(dict, (struct bencode *) &i); +} + +struct bencode_keyvalue *ben_dict_ordered_items(const struct bencode *b) +{ + struct bencode_keyvalue *pairs; + size_t i; + const struct bencode_dict *dict = ben_dict_const_cast(b); + if (dict == NULL) + return NULL; + pairs = malloc(dict->n * sizeof(pairs[0])); + if (pairs == NULL) + return NULL; + for (i = 0; i < dict->n; i++) { + pairs[i].key = dict->nodes[i].key; + pairs[i].value = dict->nodes[i].value; + } + qsort(pairs, dict->n, sizeof(pairs[0]), ben_cmp_qsort); + return pairs; +} + +static size_t dict_find_pos(struct bencode_dict *d, + const struct bencode *key, long long hash) +{ + size_t pos = hash_bucket_head(hash, d); + while (pos != -1) { + assert(pos < d->n); + if (d->nodes[pos].hash == hash && + ben_cmp(d->nodes[pos].key, key) == 0) + break; + pos = d->nodes[pos].next; + } + return pos; +} + +static void dict_unlink(struct bencode_dict *d, size_t bucket, size_t unlinkpos) +{ + size_t pos = d->buckets[bucket]; + size_t next; + size_t nextnext; + + assert(unlinkpos < d->n); + + if (pos == unlinkpos) { + next = d->nodes[unlinkpos].next; + assert(next < d->n || next == -1); + d->buckets[bucket] = next; + return; + } + while (pos != -1) { + assert(pos < d->n); + next = d->nodes[pos].next; + if (next == unlinkpos) { + nextnext = d->nodes[next].next; + assert(nextnext < d->n || nextnext == -1); + d->nodes[pos].next = nextnext; + return; + } + pos = next; + } + die("Key should have been found. Can not unlink position %zu.\n", unlinkpos); +} + +/* Remove node from the linked list, if found */ +static struct bencode *dict_pop(struct bencode_dict *d, + const struct bencode *key, long long hash) +{ + struct bencode *value; + size_t removebucket = hash_bucket(hash, d); + size_t tailpos = d->n - 1; + size_t tailhash = d->nodes[tailpos].hash; + size_t tailbucket = hash_bucket(tailhash, d); + size_t removepos; + + removepos = dict_find_pos(d, key, hash); + if (removepos == -1) + return NULL; + key = NULL; /* avoid using the pointer again, it may not be valid */ + + /* + * WARNING: complicated code follows. + * + * First, unlink the node to be removed and the tail node. + * We will actually later swap the positions of removed node and + * tail node inside the d->nodes array. We want to preserve + * d->nodes array in a state where positions from 0 to (d->n - 1) + * are always occupied with a valid node. This is done to make + * dictionary walk fast by simply walking positions 0 to (d->n - 1) + * in a for loop. + */ + dict_unlink(d, removebucket, removepos); + if (removepos != tailpos) + dict_unlink(d, tailbucket, tailpos); + + /* Then read the removed node and free its key */ + value = d->nodes[removepos].value; + ben_free(d->nodes[removepos].key); + + /* Then re-insert the unliked tail node in the place of removed node */ + d->nodes[removepos] = d->nodes[tailpos]; + memset(&d->nodes[tailpos], 0, sizeof d->nodes[tailpos]); /* poison */ + d->nodes[tailpos].next = ((size_t) -1) / 2; + + /* + * Then re-link the tail node to its bucket, unless the tail node + * was the one to be removed. + */ + if (removepos != tailpos) { + d->nodes[removepos].next = d->buckets[tailbucket]; + d->buckets[tailbucket] = removepos; + } + + d->n--; + + if (d->n <= (d->alloc / 4) && d->alloc >= 8) + resize_dict(d, d->alloc / 2); + + return value; +} + +struct bencode *ben_dict_pop(struct bencode *dict, const struct bencode *key) +{ + struct bencode_dict *d = ben_dict_cast(dict); + return dict_pop(d, key, ben_hash(key)); +} + +struct bencode *ben_dict_pop_by_str(struct bencode *dict, const char *key) +{ + struct bencode_str s; + inplace_ben_str(&s, key, strlen(key)); + return ben_dict_pop(dict, (struct bencode *) &s); +} + +struct bencode *ben_dict_pop_by_int(struct bencode *dict, long long key) +{ + struct bencode_int i; + inplace_ben_int(&i, key); + return ben_dict_pop(dict, (struct bencode *) &i); +} + +/* This can be used from the ben_dict_for_each() iterator */ +struct bencode *ben_dict_pop_current(struct bencode *dict, size_t *pos) +{ + struct bencode_dict *d = ben_dict_cast(dict); + struct bencode *value = ben_dict_pop(dict, d->nodes[*pos].key); + (*pos)--; + return value; +} + +int ben_dict_set(struct bencode *dict, struct bencode *key, struct bencode *value) +{ + struct bencode_dict *d = ben_dict_cast(dict); + long long hash = ben_hash(key); + size_t bucket; + size_t pos; + + assert(value != NULL); + + pos = hash_bucket_head(hash, d); + for (; pos != -1; pos = d->nodes[pos].next) { + assert(pos < d->n); + if (d->nodes[pos].hash != hash || ben_cmp(d->nodes[pos].key, key) != 0) + continue; + ben_free(d->nodes[pos].key); + ben_free(d->nodes[pos].value); + d->nodes[pos].key = key; + d->nodes[pos].value = value; + /* 'hash' and 'next' members stay the same */ + return 0; + } + + assert(d->n <= d->alloc); + if (d->n == d->alloc && resize_dict(d, -1)) + return -1; + + bucket = hash_bucket(hash, d); + pos = d->n; + d->nodes[pos] = (struct bencode_dict_node) {.hash = hash, + .key = key, + .value = value, + .next = d->buckets[bucket]}; + d->n++; + d->buckets[bucket] = pos; + return 0; +} + +int ben_dict_set_by_str(struct bencode *dict, const char *key, struct bencode *value) +{ + struct bencode *bkey = ben_str(key); + if (bkey == NULL) + return -1; + if (ben_dict_set(dict, bkey, value)) { + ben_free(bkey); + return -1; + } + return 0; +} + +int ben_dict_set_str_by_str(struct bencode *dict, const char *key, const char *value) +{ + struct bencode *bkey = ben_str(key); + struct bencode *bvalue = ben_str(value); + if (bkey == NULL || bvalue == NULL) { + ben_free(bkey); + ben_free(bvalue); + return -1; + } + if (ben_dict_set(dict, bkey, bvalue)) { + ben_free(bkey); + ben_free(bvalue); + return -1; + } + return 0; +} + +struct bencode *ben_int(long long ll) +{ + struct bencode_int *b = alloc(BENCODE_INT); + if (b == NULL) + return NULL; + b->ll = ll; + return (struct bencode *) b; +} + +struct bencode *ben_list(void) +{ + return alloc(BENCODE_LIST); +} + +int ben_list_append(struct bencode *list, struct bencode *b) +{ + struct bencode_list *l = ben_list_cast(list); + /* NULL pointer de-reference if the cast fails */ + assert(l->n <= l->alloc); + if (l->n == l->alloc && resize_list(l, -1)) + return -1; + assert(b != NULL); + l->values[l->n] = b; + l->n++; + return 0; +} + +int ben_list_append_str(struct bencode *list, const char *s) +{ + struct bencode *bs = ben_str(s); + if (bs == NULL) + return -1; + return ben_list_append(list, bs); +} + +int ben_list_append_int(struct bencode *list, long long ll) +{ + struct bencode *bll = ben_int(ll); + if (bll == NULL) + return -1; + return ben_list_append(list, bll); +} + +struct bencode *ben_list_pop(struct bencode *list, size_t pos) +{ + struct bencode_list *l = ben_list_cast(list); + struct bencode *value; + + assert(pos < l->n); + + value = ben_list_get(list, pos); + + for (; (pos + 1) < l->n; pos++) + l->values[pos] = l->values[pos + 1]; + + l->values[l->n - 1] = NULL; + l->n--; + return value; +} + +void ben_list_set(struct bencode *list, size_t i, struct bencode *b) +{ + struct bencode_list *l = ben_list_cast(list); + if (i >= l->n) + die("ben_list_set() out of bounds: %zu\n", i); + + ben_free(l->values[i]); + assert(b != NULL); + l->values[i] = b; +} + +char *ben_print(const struct bencode *b) +{ + size_t size = get_printed_size(b); + char *data = malloc(size + 1); + struct ben_encode_ctx ctx = {.data = data, .size = size, .pos = 0}; + if (data == NULL) { + warn("No memory to print\n"); + return NULL; + } + if (print(&ctx, b)) { + free(data); + return NULL; + } + assert(ctx.pos == size); + data[ctx.pos] = 0; + return data; +} + +struct bencode *ben_str(const char *s) +{ + return ben_blob(s, strlen(s)); +} + +const char *ben_strerror(int error) +{ + switch (error) { + case BEN_OK: + return "OK (no error)"; + case BEN_INVALID: + return "Invalid data"; + case BEN_INSUFFICIENT: + return "Insufficient amount of data (need more data)"; + case BEN_NO_MEMORY: + return "Out of memory"; + case BEN_MISMATCH: + return "A given structure did not match unpack format"; + default: + fprintf(stderr, "Unknown error code: %d\n", error); + return NULL; + } +} + +static int unpack_pointer(const struct bencode *b, struct ben_decode_ctx *ctx, + va_list *vl) +{ + const char **str; + const struct bencode **ptr; + + ctx->off++; + + if (ctx->off >= ctx->len) + return insufficient(ctx); + + switch (ben_current_char(ctx)) { + case 's': /* %ps */ + ctx->off++; + if (b->type != BENCODE_STR) + return mismatch(ctx); + str = va_arg(*vl, const char **); + *str = ben_str_val(b); + return 0; + + case 'b': /* %pb */ + ctx->off++; + ptr = va_arg(*vl, const struct bencode **); + *ptr = b; + return 0; + + default: + return invalid(ctx); + } +} + +static int unpack_value(const struct bencode *b, struct ben_decode_ctx *ctx, + va_list *vl) +{ + long long val; + long long *ll; + long *l; + int *i; + unsigned long long *ull; + unsigned long *ul; + unsigned int *ui; + int longflag = 0; + + ctx->off++; + + while (ctx->off < ctx->len) { + switch (ben_current_char(ctx)) { + case 'l': + ctx->off++; + longflag++; + break; + case 'L': + case 'q': + ctx->off++; + longflag = 2; + break; + + case 'p': + return unpack_pointer(b, ctx, vl); + + /* signed */ + case 'd': + ctx->off++; + if (b->type != BENCODE_INT) + return mismatch(ctx); + val = ben_int_val(b); + switch (longflag) { + case 0: + i = va_arg(*vl, int *); + *i = val; + /* Test that no information was lost in conversion */ + if ((long long) *i != val) + return mismatch(ctx); + break; + case 1: + l = va_arg(*vl, long *); + *l = val; + if ((long long) *l != val) + return mismatch(ctx); + break; + case 2: + ll = va_arg(*vl, long long *); + *ll = val; + break; + } + return 0; + + /* unsigned */ + case 'u': + ctx->off++; + if (b->type != BENCODE_INT) + return mismatch(ctx); + val = ben_int_val(b); + if (val < 0) + return mismatch(ctx); + switch (longflag) { + case 0: + ui = va_arg(*vl, unsigned int *); + *ui = val; + if ((long long) *ui != val) + return mismatch(ctx); + break; + case 1: + ul = va_arg(*vl, unsigned long *); + *ul = val; + if ((long long) *ul != val) + return mismatch(ctx); + break; + case 2: + ull = va_arg(*vl, unsigned long long *); + *ull = val; + break; + } + return 0; + + default: + return invalid(ctx); + } + } + return insufficient(ctx); +} + +static int unpack_dict(const struct bencode *b, struct ben_decode_ctx *ctx, + va_list *vl) +{ + struct bencode *key = NULL; + const struct bencode *val; + + if (b->type != BENCODE_DICT) + return mismatch(ctx); + + ctx->off++; + + while (1) { + if (seek_char(ctx)) + return -1; + + if (ben_current_char(ctx) == '}') { + ctx->off++; + break; + } + switch (ben_current_char(ctx)) { + case '\'': + case '"': + key = decode_printed_str(ctx); + break; + case '-': + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + key = decode_printed_int(ctx); + break; + default: + return invalid(ctx); + } + if (key == NULL) + return -1; + val = ben_dict_get(b, key); + ben_free(key); + if (val == NULL) + return mismatch(ctx); + + if (seek_char(ctx)) + return -1; + if (ben_current_char(ctx) != ':') + return invalid(ctx); + ctx->off++; + + if (unpack(val, ctx, vl)) + return -1; + + if (seek_char(ctx)) + return -1; + if (ben_current_char(ctx) == ',') + ctx->off++; + else if (ben_current_char(ctx) != '}') + return invalid(ctx); + } + return 0; +} + +static int unpack_list(const struct bencode *b, struct ben_decode_ctx *ctx, + va_list *vl) +{ + const struct bencode_list *list; + size_t i = 0; + + if (b->type != BENCODE_LIST) + return mismatch(ctx); + list = ben_list_const_cast(b); + + ctx->off++; + + while (1) { + if (seek_char(ctx)) + return -1; + + if (ben_current_char(ctx) == ']') { + ctx->off++; + break; + } + if (i >= list->n) + return mismatch(ctx); + if (unpack(list->values[i], ctx, vl)) + return -1; + i++; + + if (seek_char(ctx)) + return -1; + if (ben_current_char(ctx) == ',') + ctx->off++; + else if (ben_current_char(ctx) != ']') + return invalid(ctx); + } + if (i != list->n) + return mismatch(ctx); + return 0; +} + +static int unpack(const struct bencode *b, struct ben_decode_ctx *ctx, + va_list *vl) +{ + if (seek_char(ctx)) + return insufficient(ctx); + + switch (ben_current_char(ctx)) { + case '{': + return unpack_dict(b, ctx, vl); + case '[': + return unpack_list(b, ctx, vl); + case '%': + return unpack_value(b, ctx, vl); + default: + break; + } + return -1; +} + +static int unpack_all(const struct bencode *b, struct ben_decode_ctx *ctx, + va_list *vl) +{ + if (unpack(b, ctx, vl)) + return -1; + /* check for left over characters */ + seek_char(ctx); + ctx->error = 0; + if (ctx->off < ctx->len) + return invalid(ctx); + return 0; +} + +int ben_unpack(const struct bencode *b, const char *fmt, ...) +{ + struct ben_decode_ctx ctx = {.data = fmt, .len = strlen(fmt)}; + int ret; + va_list vl; + va_start(vl, fmt); + ret = unpack_all(b, &ctx, &vl); + va_end(vl); + return ret; +} + +int ben_unpack2(const struct bencode *b, size_t *off, struct bencode_error *error, const char *fmt, ...) +{ + struct ben_decode_ctx ctx = {.data = fmt, .len = strlen(fmt)}; + int ret; + va_list vl; + va_start(vl, fmt); + ret = unpack_all(b, &ctx, &vl); + va_end(vl); + + *off = ctx.off; + if (error != NULL) { + assert((ret == 0) ^ (ctx.error != 0)); + error->error = ctx.error; + if (ret != 0) { + error->off = 0; + error->line = 0; + } else { + error->off = ctx.off; + error->line = ctx.line; + } + } + return 0; +} + +static struct bencode *pack_pointer(struct ben_decode_ctx *ctx, va_list *vl) +{ + struct bencode *b = NULL; + + ctx->off++; + + if (ctx->off >= ctx->len) + return ben_insufficient_ptr(ctx); + + switch (ben_current_char(ctx)) { + case 'b': /* %pb */ + ctx->off++; + b = va_arg(*vl, struct bencode *); + break; + default: + return ben_invalid_ptr(ctx); + } + return b; +} + +static struct bencode *pack_value(struct ben_decode_ctx *ctx, va_list *vl) +{ + struct bencode *b = NULL; + unsigned long long ull; + long long val; + int longflag = 0; + + ctx->off++; + + while (ctx->off < ctx->len) { + switch (ben_current_char(ctx)) { + case 'l': + ctx->off++; + longflag++; + break; + case 'L': + case 'q': + ctx->off++; + longflag = 2; + break; + + case 's': + ctx->off++; + b = ben_str(va_arg(*vl, const char *)); + if (b == NULL) + return ben_oom_ptr(ctx); + break; + + case 'p': + b = pack_pointer(ctx, vl); + break; + + /* signed */ + case 'd': + ctx->off++; + switch (longflag) { + case 0: + val = va_arg(*vl, int); + break; + case 1: + val = va_arg(*vl, long); + break; + case 2: + val = va_arg(*vl, long long); + break; + default: + return ben_invalid_ptr(ctx); + } + b = ben_int(val); + if (b == NULL) + return ben_oom_ptr(ctx); + break; + + /* unsigned */ + case 'u': + ctx->off++; + switch (longflag) { + case 0: + val = va_arg(*vl, unsigned int); + break; + case 1: + val = va_arg(*vl, unsigned long); + break; + case 2: + ull = va_arg(*vl, unsigned long long); + /* Check that no information was lost */ + val = ull; + if ((long long) ull != val) + return ben_invalid_ptr(ctx); + break; + default: + return ben_invalid_ptr(ctx); + } + b = ben_int(val); + if (b == NULL) + return ben_oom_ptr(ctx); + break; + + default: + return ben_invalid_ptr(ctx); + } + if (b) + return b; + } + return ben_insufficient_ptr(ctx); +} + +static struct bencode *pack_dict(struct ben_decode_ctx *ctx, va_list *vl) +{ + struct bencode *d = ben_dict(); + struct bencode *key = NULL; + struct bencode *value = NULL; + + if (d == NULL) + return ben_oom_ptr(ctx); + + ctx->off++; + + while (1) { + if (seek_char(ctx)) + goto nullpath; + + if (ben_current_char(ctx) == '}') { + ctx->off++; + break; + } + key = pack(ctx, vl); + if (key == NULL) + goto nullpath; + + if (seek_char(ctx)) + goto nullpath; + if (ben_current_char(ctx) != ':') + goto invalidpath; + ctx->off++; + + value = pack(ctx, vl); + if (value == NULL) + goto nullpath; + + if (ben_dict_set(d, key, value)) { + ben_free(key); + ben_free(value); + ben_free(d); + return ben_oom_ptr(ctx); + } + key = NULL; + value = NULL; + + if (seek_char(ctx)) + goto nullpath; + if (ben_current_char(ctx) == ',') + ctx->off++; + else if (ben_current_char(ctx) != '}') + goto invalidpath; + } + return d; + +nullpath: + ben_free(d); + ben_free(key); + ben_free(value); + return NULL; + +invalidpath: + ben_free(d); + ben_free(key); + ben_free(value); + return ben_invalid_ptr(ctx); +} + +static struct bencode *pack_list(struct ben_decode_ctx *ctx, va_list *vl) +{ + struct bencode *l = ben_list(); + struct bencode *val = NULL; + + if (l == NULL) + return ben_oom_ptr(ctx); + + ctx->off++; + + while (1) { + if (seek_char(ctx)) + goto nullpath; + + if (ben_current_char(ctx) == ']') { + ctx->off++; + break; + } + val = pack(ctx, vl); + if (val == NULL) + goto nullpath; + + if (ben_list_append(l, val)) { + ben_free(val); + ben_free(l); + return ben_oom_ptr(ctx); + } + val = NULL; + + if (seek_char(ctx)) + goto nullpath; + if (ben_current_char(ctx) == ',') + ctx->off++; + else if (ben_current_char(ctx) != ']') { + ben_free(l); + return ben_invalid_ptr(ctx); + } + } + + return l; + +nullpath: + ben_free(l); + ben_free(val); + return NULL; +} + +static struct bencode *pack(struct ben_decode_ctx *ctx, va_list *vl) +{ + if (seek_char(ctx)) + return ben_insufficient_ptr(ctx); + + switch (ben_current_char(ctx)) { + case '\'': + case '"': + return decode_printed_str(ctx); + case '-': + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + return decode_printed_int(ctx); + case 'F': + case 'T': + return decode_printed_bool(ctx); + case '{': + return pack_dict(ctx, vl); + case '[': + return pack_list(ctx, vl); + case '%': + return pack_value(ctx, vl); + default: + return ben_invalid_ptr(ctx); + } + return NULL; +} + +struct bencode *ben_pack(const char *fmt, ...) +{ + struct ben_decode_ctx ctx = {.data = fmt, .len = strlen(fmt)}; + struct bencode *b; + va_list vl; + va_start(vl, fmt); + b = pack(&ctx, &vl); + va_end(vl); + + /* check for left over characters */ + seek_char(&ctx); + if (ctx.off < ctx.len) { + ben_free(b); + return NULL; + } + return b; +} diff --git a/libs/libks/src/include/ks.h b/libs/libks/src/include/ks.h index 16dffd024a..90c06879eb 100644 --- a/libs/libks/src/include/ks.h +++ b/libs/libks/src/include/ks.h @@ -34,44 +34,54 @@ #ifndef _KS_H_ #define _KS_H_ -#include +#ifdef __cplusplus +#define KS_BEGIN_EXTERN_C extern "C" { +#define KS_END_EXTERN_C } +#else +#define KS_BEGIN_EXTERN_C +#define KS_END_EXTERN_C +#endif + +#include #include -#ifdef __cplusplus -extern "C" { -#endif /* defined(__cplusplus) */ - -#define ks_copy_string(_x, _y, _z) strncpy(_x, _y, _z - 1) -#define ks_set_string(_x, _y) ks_copy_string(_x, _y, sizeof(_x)) - - -#if (_MSC_VER >= 1400) // VC8+ -#define ks_assert(expr) assert(expr);__analysis_assume( expr ) -#endif - -#ifndef ks_assert -#define ks_assert(_x) assert(_x) -#endif - -#define ks_safe_free(_x) if (_x) free(_x); _x = NULL -#define ks_strlen_zero(s) (!s || *(s) == '\0') -#define ks_strlen_zero_buf(s) (*(s) == '\0') -#define end_of(_s) *(*_s == '\0' ? _s : _s + strlen(_s) - 1) - -#include "math.h" -#include "ks_json.h" +KS_BEGIN_EXTERN_C #define BUF_CHUNK 65536 * 50 #define BUF_START 65536 * 100 -#include -#include +/*! + \brief Test for NULL or zero length string + \param s the string to test + \return true value if the string is NULL or zero length +*/ +_Check_return_ static __inline int _zstr(_In_opt_z_ const char *s) +{ + return !s || *s == '\0'; +} +#ifdef _PREFAST_ +#define zstr(x) (_zstr(x) ? 1 : __analysis_assume(x),0) +#else +#define zstr(x) _zstr(x) +#endif +#define ks_strlen_zero(x) zstr(x) +#define ks_strlen_zero_buf(x) zstr_buf(x) +#define zstr_buf(s) (*(s) == '\0') + +#define ks_set_string(_x, _y) ks_copy_string(_x, _y, sizeof(_x)) +#define ks_safe_free(_x) if (_x) free(_x); _x = NULL +#define end_of(_s) *(*_s == '\0' ? _s : _s + strlen(_s) - 1) #define ks_test_flag(obj, flag) ((obj)->flags & flag) #define ks_set_flag(obj, flag) (obj)->flags |= (flag) #define ks_clear_flag(obj, flag) (obj)->flags &= ~(flag) +#define ks_recv(_h) ks_recv_event(_h, 0, NULL) +#define ks_recv_timed(_h, _ms) ks_recv_event_timed(_h, _ms, 0, NULL) - +KS_DECLARE(ks_status_t) ks_init(void); +KS_DECLARE(ks_status_t) ks_shutdown(void); +KS_DECLARE(ks_pool_t *) ks_global_pool(void); +KS_DECLARE(ks_status_t) ks_global_set_cleanup(ks_pool_cleanup_fn_t fn, void *arg); KS_DECLARE(int) ks_vasprintf(char **ret, const char *fmt, va_list ap); KS_DECLARE_DATA extern ks_logger_t ks_log; @@ -81,42 +91,46 @@ KS_DECLARE(void) ks_global_set_logger(ks_logger_t logger); /*! Sets the default log level for libks */ KS_DECLARE(void) ks_global_set_default_logger(int level); - -#include "ks_threadmutex.h" -#include "ks_config.h" -#include "ks_buffer.h" -#include "mpool.h" -#include "simclist.h" -#include "table.h" - KS_DECLARE(size_t) ks_url_encode(const char *url, char *buf, size_t len); -KS_DECLARE(char *)ks_url_decode(char *s); -KS_DECLARE(const char *)ks_stristr(const char *instr, const char *str); +KS_DECLARE(char *) ks_url_decode(char *s); +KS_DECLARE(const char *) ks_stristr(const char *instr, const char *str); KS_DECLARE(int) ks_toupper(int c); KS_DECLARE(int) ks_tolower(int c); +KS_DECLARE(char *) ks_copy_string(char *from_str, const char *to_str, ks_size_t from_str_len); KS_DECLARE(int) ks_snprintf(char *buffer, size_t count, const char *fmt, ...); +KS_DECLARE(unsigned int) ks_separate_string(char *buf, const char *delim, char **array, unsigned int arraylen); +KS_DECLARE(int) ks_cpu_count(void); + static __inline__ int ks_safe_strcasecmp(const char *s1, const char *s2) { + if (!(s1 && s2)) { + return 1; + } - -KS_DECLARE(int) ks_wait_sock(ks_socket_t sock, uint32_t ms, ks_poll_t flags); - -KS_DECLARE(unsigned int) ks_separate_string_string(char *buf, const char *delim, char **array, unsigned int arraylen); - -#define ks_recv(_h) ks_recv_event(_h, 0, NULL) -#define ks_recv_timed(_h, _ms) ks_recv_event_timed(_h, _ms, 0, NULL) - -static __inline__ int ks_safe_strcasecmp(const char *s1, const char *s2) -{ - if (!(s1 && s2)) { - return 1; + return strcasecmp(s1, s2); } - return strcasecmp(s1, s2); -} -#ifdef __cplusplus -} -#endif /* defined(__cplusplus) */ +KS_DECLARE(void) ks_random_string(char *buf, uint16_t len, char *set); +#include "ks_pool.h" +#include "ks_printf.h" +#include "ks_json.h" +#include "ks_threadmutex.h" +#include "ks_hash.h" +#include "ks_config.h" +#include "ks_q.h" +#include "ks_buffer.h" +#include "ks_time.h" +#include "ks_socket.h" +#include "ks_dso.h" +#include "ks_dht.h" +#include "ks_utp.h" +#include "simclist.h" +#include "ks_ssl.h" +#include "kws.h" +#include "ks_bencode.h" +#include "ks_rng.h" + +KS_END_EXTERN_C #endif /* defined(_KS_H_) */ diff --git a/libs/libks/src/include/ks_bencode.h b/libs/libks/src/include/ks_bencode.h new file mode 100644 index 0000000000..d2257795f6 --- /dev/null +++ b/libs/libks/src/include/ks_bencode.h @@ -0,0 +1,730 @@ +/* + * libbencodetools + * + * Written by Heikki Orsila and + * Janne Kulmala in 2011. + */ + +#ifndef TYPEVALIDATOR_BENCODE_H +#define TYPEVALIDATOR_BENCODE_H + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* Used to verify format strings in compile time */ +#ifdef __GNUC__ +#define BEN_CHECK_FORMAT(...) __attribute__ ((format( __VA_ARGS__ ))) +#else +#define BEN_CHECK_FORMAT(args) +#endif + +enum { + BENCODE_BOOL = 1, + BENCODE_DICT, + BENCODE_INT, + BENCODE_LIST, + BENCODE_STR, + BENCODE_USER, +}; + +enum { + BEN_OK = 0, /* No errors. Set to zero. Non-zero implies an error. */ + BEN_INVALID, /* Invalid data was given to decoder */ + BEN_INSUFFICIENT, /* Insufficient amount of data for decoding */ + BEN_NO_MEMORY, /* Memory allocation failed */ + BEN_MISMATCH, /* A given structure did not match unpack format */ +}; + +struct bencode { + char type; +}; + +struct bencode_bool { + char type; + char b; +}; + +struct bencode_dict_node { + long long hash; + struct bencode *key; + struct bencode *value; + size_t next; +}; + +struct bencode_dict { + char type; + char shared; /* non-zero means that the internal data is shared with + other instances and should not be freed */ + size_t n; + size_t alloc; + size_t *buckets; + struct bencode_dict_node *nodes; +}; + +struct bencode_int { + char type; + long long ll; +}; + +struct bencode_list { + char type; + char shared; /* non-zero means that the internal data is shared with + other instances and should not be freed */ + size_t n; + size_t alloc; + struct bencode **values; +}; + +struct bencode_str { + char type; + size_t len; + char *s; +}; + +struct ben_decode_ctx; +struct ben_encode_ctx; + +struct bencode_type { + size_t size; + struct bencode *(*decode) (struct ben_decode_ctx *ctx); + int (*encode) (struct ben_encode_ctx *ctx, const struct bencode *b); + size_t (*get_size) (const struct bencode *b); + void (*free) (struct bencode *b); + int (*cmp) (const struct bencode *a, const struct bencode *b); +}; + +struct bencode_user { + char type; + struct bencode_type *info; +}; + +struct bencode_error { + int error; /* 0 if no errors */ + int line; /* Error line: 0 is the first line */ + size_t off; /* Error offset in bytes from the start */ +}; + +/* Allocate an instance of a user-defined type */ +void *ben_alloc_user(struct bencode_type *type); + +/* + * Try to set capacity of a list or a dict to 'n' objects. + * The function does nothing if 'n' is less than or equal to the number of + * objects in 'b'. That is, nothing happens if n <= ben_{dict|list}_len(b). + * + * This function is used only for advice. The implementation need not obey it. + * + * The function returns 0 if the new capacity is used, otherwise -1. + * + * Note: This can be used to make construction of lists and dicts + * more efficient when the number of inserted items is known in advance. + */ +int ben_allocate(struct bencode *b, size_t n); + +/* + * Returns an identical but a separate copy of structure b. Returns NULL if + * there is no memory to make a copy. The copy is made recursively. + */ +struct bencode *ben_clone(const struct bencode *b); + +/* + * Returns a weak reference copy of structure b. Only a minimum amount of + * data is copied because the returned structure references to the same + * internal data as the original structure. As a result, the original + * structure must remain valid until the copy is destroyed. + * + * This function is used for optimization for special cases. + */ +struct bencode *ben_shared_clone(const struct bencode *b); + +/* + * ben_cmp() is similar to strcmp(). It compares integers, strings and lists + * similar to Python. User-defined types can be also compared. + * Note: an integer is always less than a string. + * + * ben_cmp(a, b) returns a negative value if "a < b", 0 if "a == b", + * or a positive value if "a > b". + * + * Algorithm for comparing dictionaries is: + * If 'a' and 'b' have different number of keys or keys have different values, + * a non-zero value is returned. Otherwise, they have the exact same keys + * and comparison is done in ben_cmp() order of keys. The value for each key + * is compared, and the first inequal value (ben_cmp() != 0) defines the + * return value of the comparison. + * + * Note: recursive dictionaries in depth have the same issues. + */ +int ben_cmp(const struct bencode *a, const struct bencode *b); + +/* Same as ben_cmp(), but the second argument is a C string */ +int ben_cmp_with_str(const struct bencode *a, const char *s); + +/* + * Comparison function suitable for qsort(). Uses ben_cmp(), so this can be + * used to order both integer and string arrays. + */ +int ben_cmp_qsort(const void *a, const void *b); + +/* + * Decode 'data' with 'len' bytes of data. Returns NULL on error. + * The encoded data must be exactly 'len' bytes (not less), otherwise NULL + * is returned. ben_decode2() function supports partial decoding ('len' is + * larger than actual decoded message) and gives more accurate error reports. + */ +struct bencode *ben_decode(const void *data, size_t len); + +/* + * Same as ben_decode(), but allows one to set start offset for decoding with + * 'off' and reports errors more accurately. + * + * '*off' must point to decoding start offset inside 'data'. + * If decoding is successful, '*off' is updated to point to the next byte + * after the decoded message. + * + * If 'error != NULL', it is updated according to the success and error of + * the decoding. BEN_OK is success, BEN_INVALID means invalid data. + * BEN_INSUFFICIENT means data is invalid but could be valid if more data + * was given for decoding. BEN_NO_MEMORY means decoding ran out of memory. + */ +struct bencode *ben_decode2(const void *data, size_t len, size_t *off, int *error); + +/* + * Same as ben_decode2(), but allows one to define user types. + */ +struct bencode *ben_decode3(const void *data, size_t len, size_t *off, int *error, struct bencode_type *types[128]); + +/* + * Same as ben_decode(), but decodes data encoded with ben_print(). This is + * whitespace tolerant, so intended Python syntax can also be read. + * The decoder skips comments that begin with a '#' character. + * The comment starts from '#' character and ends at the end of the same line. + * + * For example, this can be used to read in config files written as a Python + * dictionary. + * + * ben_decode_printed2() fills information about the error in + * struct bencode_error. + * error->error is 0 on success, otherwise it is an error code + * (see ben_decode2()). + * error->line is the line number where error occured. + * error->off is the byte offset of error (approximation). + */ +struct bencode *ben_decode_printed(const void *data, size_t len); +struct bencode *ben_decode_printed2(const void *data, size_t len, size_t *off, struct bencode_error *error); + +/* Get the serialization size of bencode structure 'b' */ +size_t ben_encoded_size(const struct bencode *b); + +/* encode 'b'. Return encoded data with a pointer, and length in '*len' */ +void *ben_encode(size_t *len, const struct bencode *b); + +/* + * encode 'b' into 'data' buffer with at most 'maxlen' bytes. + * Returns the size of encoded data. + */ +size_t ben_encode2(char *data, size_t maxlen, const struct bencode *b); + +/* + * You must use ben_free() for all allocated bencode structures after use. + * If b == NULL, ben_free does nothing. + * + * ben_free() frees all the objects contained within the bencoded structure. + * It recursively iterates over lists and dictionaries and frees objects. + */ +void ben_free(struct bencode *b); + +long long ben_str_hash(const struct bencode *b); +long long ben_int_hash(const struct bencode *b); +long long ben_hash(const struct bencode *b); + +/* Create a string from binary data with len bytes */ +struct bencode *ben_blob(const void *data, size_t len); + +/* Create a boolean from integer */ +struct bencode *ben_bool(int b); + +/* Create an empty dictionary */ +struct bencode *ben_dict(void); + +/* + * Try to locate 'key' in dictionary. Returns the associated value, if found. + * Returns NULL if the key does not exist. + */ +struct bencode *ben_dict_get(const struct bencode *d, const struct bencode *key); + +struct bencode *ben_dict_get_by_str(const struct bencode *d, const char *key); +struct bencode *ben_dict_get_by_int(const struct bencode *d, long long key); + +struct bencode_keyvalue { + struct bencode *key; + struct bencode *value; +}; + +/* + * Returns an array of key-value pairs in key order as defined by ben_cmp(). + * Array elements are struct bencode_keyvalue members. Returns NULL if + * the array can not be allocated or the bencode object is not a dictionary. + * The returned array must be freed by using free(). The length of the + * array can be determined with ben_dict_len(d). + * + * Warning: key and value pointers in the array are pointers to exact same + * objects in the dictionary. Therefore, the dictionary and its key-values + * must exist while the same keys and values are accessed from the array. + */ +struct bencode_keyvalue *ben_dict_ordered_items(const struct bencode *d); + +/* + * Try to locate 'key' in dictionary. Returns the associated value, if found. + * The value must be later freed with ben_free(). Returns NULL if the key + * does not exist. + */ +struct bencode *ben_dict_pop(struct bencode *d, const struct bencode *key); + +struct bencode *ben_dict_pop_by_str(struct bencode *d, const char *key); +struct bencode *ben_dict_pop_by_int(struct bencode *d, long long key); + +/* + * Set 'key' in dictionary to be 'value'. An old value exists for the key + * is freed if it exists. 'key' and 'value' are owned by the dictionary + * after a successful call (one may not call ben_free() for 'key' or + * 'value'). One may free 'key' and 'value' if the call is unsuccessful. + * + * Returns 0 on success, -1 on failure (no memory). + */ +int ben_dict_set(struct bencode *d, struct bencode *key, struct bencode *value); + +/* Same as ben_dict_set(), but the key is a C string */ +int ben_dict_set_by_str(struct bencode *d, const char *key, struct bencode *value); + +/* Same as ben_dict_set(), but the key and value are C strings */ +int ben_dict_set_str_by_str(struct bencode *d, const char *key, const char *value); + +struct bencode *ben_int(long long ll); + +/* Create an empty list */ +struct bencode *ben_list(void); + +/* + * Append 'b' to 'list'. Returns 0 on success, -1 on failure (no memory). + * One may not call ben_free(b) after a successful call, because the list owns + * the object 'b'. + */ +int ben_list_append(struct bencode *list, struct bencode *b); + +int ben_list_append_str(struct bencode *list, const char *s); +int ben_list_append_int(struct bencode *list, long long ll); + +/* Remove and return value at position 'pos' in list */ +struct bencode *ben_list_pop(struct bencode *list, size_t pos); + +/* + * Returns a Python formatted C string representation of 'b' on success, + * NULL on failure. The returned string should be freed with free(). + * + * Note: The string is terminated with '\0'. All instances of '\0' bytes in + * the bencoded data are escaped so that there is only one '\0' byte + * in the generated string at the end. + */ +char *ben_print(const struct bencode *b); + +/* Create a string from C string (note bencode string may contain '\0'. */ +struct bencode *ben_str(const char *s); + +/* Return a human readable explanation of error returned with ben_decode2() */ +const char *ben_strerror(int error); + +/* + * Unpack a Bencoded structure similar to scanf(). Takes a format string and + * a list of pointers as variable arguments. The given b structure is checked + * against the format and values are unpacked using the given specifiers. + * A specifier begins with a percent (%) that follows a string of specifier + * characters documented below. + * The syntax is similar to Python format for recursive data structures, and + * consists of tokens {, }, [, ] with any number of spaces between them. + * The keys of a dictionary are given as literal strings or integers and + * matched against the keys of the Bencoded structure. + * + * Unpack modifiers: + * l The integer is of type long or unsigned long, and the type of the + * argument is expected to be long * or unsigned long *. + * ll The integer is a long long or an unsigned long long, and the + * argument is long long * or unsigned long long *. + * L Same as ll. + * q Same as ll. + * + * Unpack specifiers: + * %ps The Bencode value must be a string and a pointer to a string + * (char **) is expected to be given as arguments. Note, returns a + * reference to the internal string buffer. The returned memory should + * not be freed and it has the same life time as the Bencode string. + * + * %pb Takes any structure and writes a pointer given as an argument. + * The argument is expected to be "struct bencode **". Note, returns a + * reference to the value inside the structure passed to ben_unpack(). + * The returned memory should not be freed and it has the same life + * time as the original structure. + * + * %d The bencode value is expected to be a (signed) integer. The + * preceeding conversion modifiers define the type of the given + * pointer. + + * %u The bencode value is expected to be an unsigned integer. The + * preceeding conversion modifiers define the type of the given + * pointer. + */ +int ben_unpack(const struct bencode *b, const char *fmt, ...) + BEN_CHECK_FORMAT(scanf, 2, 3); + +int ben_unpack2(const struct bencode *b, size_t *off, struct bencode_error *error, const char *fmt, ...) + BEN_CHECK_FORMAT(scanf, 4, 5); + +/* + * Pack a Bencoded structure similar to printf(). Takes a format string and + * a list of values as variable arguments. + * Works similarly to ben_decode_printed(), but allows the string to values + * specifiers which are replaced with values given as arguments. + * A specifier begins with a percent (%) that follows a string of specifier + * characters documented below. + * + * Value modifiers: + * l The integer is of type long or unsigned long. + * ll The integer is a long long or an unsigned long long. + * L Same as ll. + * q Same as ll. + * + * Value specifiers: + * %s A string pointer (char *) expected to be given as argument. A new + * Bencode string is constructed from the given string. + * + * %pb A Bencode structure (struct bencode *) is expected to be given as + * argument. Note, takes ownership of the structure, even when an + * error is returned. + * + * %d Constructs a new integer from the given (signed) integer. The + * preceeding conversion modifiers define the type of the value. + * + * %u Constructs a new integer from the given unsigned integer. The + * preceeding conversion modifiers define the type of the value. + */ +struct bencode *ben_pack(const char *fmt, ...) + BEN_CHECK_FORMAT(printf, 1, 2); + +/* ben_is_bool() returns 1 iff b is a boolean, 0 otherwise */ +static inline int ben_is_bool(const struct bencode *b) +{ + return b->type == BENCODE_BOOL; +} +static inline int ben_is_dict(const struct bencode *b) +{ + return b->type == BENCODE_DICT; +} +static inline int ben_is_int(const struct bencode *b) +{ + return b->type == BENCODE_INT; +} +static inline int ben_is_list(const struct bencode *b) +{ + return b->type == BENCODE_LIST; +} +static inline int ben_is_str(const struct bencode *b) +{ + return b->type == BENCODE_STR; +} +static inline int ben_is_user(const struct bencode *b) +{ + return b->type == BENCODE_USER; +} + +/* + * ben_bool_const_cast(b) returns "(const struct bencode_bool *) b" if the + * underlying object is a boolean, NULL otherwise. + */ +static inline const struct bencode_bool *ben_bool_const_cast(const struct bencode *b) +{ + return b->type == BENCODE_BOOL ? ((const struct bencode_bool *) b) : NULL; +} + +/* + * ben_bool_cast(b) returns "(struct bencode_bool *) b" if the + * underlying object is a boolean, NULL otherwise. + */ +static inline struct bencode_bool *ben_bool_cast(struct bencode *b) +{ + return b->type == BENCODE_BOOL ? ((struct bencode_bool *) b) : NULL; +} + +static inline const struct bencode_dict *ben_dict_const_cast(const struct bencode *b) +{ + return b->type == BENCODE_DICT ? ((const struct bencode_dict *) b) : NULL; +} +static inline struct bencode_dict *ben_dict_cast(struct bencode *b) +{ + return b->type == BENCODE_DICT ? ((struct bencode_dict *) b) : NULL; +} + +static inline const struct bencode_int *ben_int_const_cast(const struct bencode *i) +{ + return i->type == BENCODE_INT ? ((const struct bencode_int *) i) : NULL; +} +static inline struct bencode_int *ben_int_cast(struct bencode *i) +{ + return i->type == BENCODE_INT ? ((struct bencode_int *) i) : NULL; +} + +static inline const struct bencode_list *ben_list_const_cast(const struct bencode *list) +{ + return list->type == BENCODE_LIST ? ((const struct bencode_list *) list) : NULL; +} +static inline struct bencode_list *ben_list_cast(struct bencode *list) +{ + return list->type == BENCODE_LIST ? ((struct bencode_list *) list) : NULL; +} + +static inline const struct bencode_str *ben_str_const_cast(const struct bencode *str) +{ + return str->type == BENCODE_STR ? ((const struct bencode_str *) str) : NULL; +} +static inline struct bencode_str *ben_str_cast(struct bencode *str) +{ + return str->type == BENCODE_STR ? ((struct bencode_str *) str) : NULL; +} + +static inline const struct bencode_user *ben_user_const_cast(const struct bencode *user) +{ + return user->type == BENCODE_USER ? ((const struct bencode_user *) user) : NULL; +} +static inline struct bencode_user *ben_user_cast(struct bencode *user) +{ + return user->type == BENCODE_USER ? ((struct bencode_user *) user) : NULL; +} + +static inline int ben_is_user_type(const struct bencode *b, struct bencode_type *type) +{ + return b->type == BENCODE_USER ? ((const struct bencode_user *) b)->info == type : 0; +} + +static inline const void *ben_user_type_const_cast(const struct bencode *b, struct bencode_type *type) +{ + return (b->type == BENCODE_USER && ((const struct bencode_user *) b)->info == type) ? b : NULL; +} +static inline void *ben_user_type_cast(struct bencode *b, struct bencode_type *type) +{ + return (b->type == BENCODE_USER && ((const struct bencode_user *) b)->info == type) ? b : NULL; +} + +/* Return the number of keys in a dictionary 'b' */ +static inline size_t ben_dict_len(const struct bencode *b) +{ + return ben_dict_const_cast(b)->n; +} + +/* Return the number of items in a list 'b' */ +static inline size_t ben_list_len(const struct bencode *b) +{ + return ben_list_const_cast(b)->n; +} + +/* ben_list_get(list, i) returns object at position i in list */ +static inline struct bencode *ben_list_get(const struct bencode *list, size_t i) +{ + const struct bencode_list *l = ben_list_const_cast(list); + if (i >= l->n) { + fprintf(stderr, "bencode: List index out of bounds\n"); + abort(); + } + return l->values[i]; +} + +/* + * ben_list_set(list, i, b) sets object b to list at position i. + * The old value at position i is freed. + * The program aborts if position i is out of bounds. + */ +void ben_list_set(struct bencode *list, size_t i, struct bencode *b); + +/* Return the number of bytes in a string 'b' */ +static inline size_t ben_str_len(const struct bencode *b) +{ + return ben_str_const_cast(b)->len; +} + +/* Return boolean value (0 or 1) of 'b' */ +static inline int ben_bool_val(const struct bencode *b) +{ + return ben_bool_const_cast(b)->b ? 1 : 0; +} + +/* Return integer value of 'b' */ +static inline long long ben_int_val(const struct bencode *b) +{ + return ben_int_const_cast(b)->ll; +} + +/* + * Note: the string is always zero terminated. Also, the string may + * contain more than one zero. + * bencode strings are not compatible with C strings. + */ +static inline const char *ben_str_val(const struct bencode *b) +{ + return ben_str_const_cast(b)->s; +} + +/* + * ben_list_for_each() is an iterator macro for bencoded lists. + * + * Note, it is not allowed to change the list while iterating except by + * using ben_list_pop_current(). + * + * pos is a size_t. + * + * Example: + * + * size_t pos; + * struct bencode *list = xxx; + * struct bencode *value; + * ben_list_for_each(value, pos, list) { + * inspect(value); + * } + */ +#define ben_list_for_each(value, pos, l) \ + for ((pos) = (size_t) 0; \ + (pos) < (ben_list_const_cast(l))->n && \ + ((value) = ((const struct bencode_list *) (l))->values[(pos)]) != NULL ; \ + (pos)++) + +/* + * ben_list_pop_current() returns and removes the current item at 'pos' + * while iterating the list with ben_list_for_each(). + * It can be used more than once per walk, but only once per item. + * Example below: + * + * Filter out all items from list whose string value does not begin with "foo". + * + * ben_list_for_each(value, pos, list) { + * if (strncmp(ben_str_val(value), "foo", 3) != 0) + * ben_free(ben_list_pop_current(&pos, list)); + * } + */ +static inline struct bencode *ben_list_pop_current(struct bencode *list, + size_t *pos) +{ + struct bencode *value = ben_list_pop(list, *pos); + (*pos)--; + return value; +} + +/* + * ben_dict_for_each() is an iterator macro for bencoded dictionaries. + * + * Note, it is not allowed to change the dictionary while iterating except + * by using ben_dict_pop_current(). + * + * struct bencode *dict = ben_dict(); + * size_t pos; + * struct bencode *key; + * struct bencode *value; + * ben_dict_set_str_by_str(dict, "foo", "bar"); + * + * ben_dict_for_each(key, value, pos, dict) { + * use(key, value); + * } + * + * pos is a size_t. + */ +#define ben_dict_for_each(bkey, bvalue, pos, d) \ + for ((pos) = 0; \ + (pos) < (ben_dict_const_cast(d))->n && \ + ((bkey) = ((const struct bencode_dict *) (d))->nodes[(pos)].key) != NULL && \ + ((bvalue) = ((const struct bencode_dict *) (d))->nodes[(pos)].value) != NULL; \ + (pos)++) + +/* + * ben_dict_pop_current() deletes the current item at 'pos' while iterating + * the dictionary with ben_dict_for_each(). It can be used more than once + * per walk, but only once per item. Example below: + * + * Filter out all items from dictionary whose key does not begin with "foo". + * + * ben_dict_for_each(key, value, pos, dict) { + * if (strncmp(ben_str_val(key), "foo", 3) != 0) + * ben_free(ben_dict_pop_current(dict, &pos)); + * } + */ +struct bencode *ben_dict_pop_current(struct bencode *dict, size_t *pos); + +/* Report an error while decoding. Returns NULL. */ +void *ben_insufficient_ptr(struct ben_decode_ctx *ctx); +void *ben_invalid_ptr(struct ben_decode_ctx *ctx); +void *ben_oom_ptr(struct ben_decode_ctx *ctx); + +/* + * Decode from the current position of 'ctx'. + * + * This function is used to implement decoders for user-defined types. + */ +struct bencode *ben_ctx_decode(struct ben_decode_ctx *ctx); + +/* + * Test whether the input of 'ctx' has at least n bytes left. + * Returns 0 when there is enough bytes left and -1 when there isn't. + * + * This function is used to implement decoders for user-defined types. + */ +int ben_need_bytes(const struct ben_decode_ctx *ctx, size_t n); + +/* + * Returns the character in current position of 'ctx'. + * + * This function is used to implement decoders for user-defined types. + */ +char ben_current_char(const struct ben_decode_ctx *ctx); + +/* + * Get the next n bytes from input. + * Returns pointer to the data or NULL when there aren't enough bytes left. + * + * This function is used to implement decoders for user-defined types. + */ +const char *ben_current_buf(const struct ben_decode_ctx *ctx, size_t n); + +/* + * Increments current position by n. + * + * This function is used to implement decoders for user-defined types. + */ +void ben_skip(struct ben_decode_ctx *ctx, size_t n); + +/* + * Encode to the output of 'ctx'. The size of the encoded data can be obtained + * with ben_encoded_size(). + * + * This function is used to implement encoders for user-defined types. + */ +int ben_ctx_encode(struct ben_encode_ctx *ctx, const struct bencode *b); + +/* + * Append one character to output of 'ctx'. The amount of bytes written to the + * output must be the same as returned by get_size(). + * + * This function is used to implement encoders for user-defined types. + */ +int ben_put_char(struct ben_encode_ctx *ctx, char c); + +/* + * Append data to output of 'ctx'. The amount of bytes written to the output + * must be the same as returned by get_size(). + * + * This function is used to implement encoders for user-defined types. + */ +int ben_put_buffer(struct ben_encode_ctx *ctx, const void *buf, size_t len); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/libs/libks/src/include/ks_buffer.h b/libs/libks/src/include/ks_buffer.h index 41c08c9a97..dc28ef7fbf 100644 --- a/libs/libks/src/include/ks_buffer.h +++ b/libs/libks/src/include/ks_buffer.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010-2012, Anthony Minessale II + * Copyright (c) 2010-2015, Anthony Minessale II * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -30,9 +30,14 @@ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#include "ks.h" + #ifndef KS_BUFFER_H #define KS_BUFFER_H + +#include "ks.h" + +KS_BEGIN_EXTERN_C + /** * @defgroup ks_buffer Buffer Routines * @ingroup buffer @@ -133,6 +138,7 @@ KS_DECLARE(ks_size_t) ks_buffer_seek(ks_buffer_t *buffer, ks_size_t datalen); KS_DECLARE(ks_size_t) ks_buffer_zwrite(ks_buffer_t *buffer, const void *data, ks_size_t datalen); +KS_END_EXTERN_C #endif /* For Emacs: * Local Variables: diff --git a/libs/libks/src/include/ks_config.h b/libs/libks/src/include/ks_config.h index 2a5a330418..bdbb9cea98 100644 --- a/libs/libks/src/include/ks_config.h +++ b/libs/libks/src/include/ks_config.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007-2014, Anthony Minessale II + * Copyright (c) 2007-2015, Anthony Minessale II * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -52,16 +52,12 @@ #ifndef KS_CONFIG_H #define KS_CONFIG_H +KS_BEGIN_EXTERN_C + #include "ks.h" -#ifdef __cplusplus -extern "C" { -#endif /* defined(__cplusplus) */ - - #define KS_URL_SEPARATOR "://" - #ifdef WIN32 #define KS_PATH_SEPARATOR "\\" #ifndef KS_CONFIG_DIR @@ -107,26 +103,26 @@ atoi(expr))) ? 1 : 0 typedef struct ks_config ks_config_t; /*! \brief A simple file handle representing an open configuration file **/ -struct ks_config { - /*! FILE stream buffer to the opened file */ - FILE *file; - /*! path to the file */ - char path[512]; - /*! current category */ - char category[256]; - /*! current section */ - char section[256]; - /*! buffer of current line being read */ - char buf[1024]; - /*! current line number in file */ - int lineno; - /*! current category number in file */ - int catno; - /*! current section number in file */ - int sectno; + struct ks_config { + /*! FILE stream buffer to the opened file */ + FILE *file; + /*! path to the file */ + char path[512]; + /*! current category */ + char category[256]; + /*! current section */ + char section[256]; + /*! buffer of current line being read */ + char buf[1024]; + /*! current line number in file */ + int lineno; + /*! current category number in file */ + int catno; + /*! current section number in file */ + int sectno; - int lockto; -}; + int lockto; + }; /*! \brief Open a configuration file @@ -134,13 +130,13 @@ struct ks_config { \param file_path path to the file \return 1 (true) on success 0 (false) on failure */ -KS_DECLARE(int) ks_config_open_file(ks_config_t * cfg, const char *file_path); + KS_DECLARE(int) ks_config_open_file(ks_config_t *cfg, const char *file_path); /*! \brief Close a previously opened configuration file \param cfg (ks_config_t *) config handle to use */ -KS_DECLARE(void) ks_config_close_file(ks_config_t * cfg); + KS_DECLARE(void) ks_config_close_file(ks_config_t *cfg); /*! \brief Retrieve next name/value pair from configuration file @@ -148,24 +144,20 @@ KS_DECLARE(void) ks_config_close_file(ks_config_t * cfg); \param var pointer to aim at the new variable name \param val pointer to aim at the new value */ -KS_DECLARE(int) ks_config_next_pair(ks_config_t * cfg, char **var, char **val); + KS_DECLARE(int) ks_config_next_pair(ks_config_t *cfg, char **var, char **val); /*! \brief Retrieve the CAS bits from a configuration string value \param strvalue pointer to the configuration string value (expected to be in format whatever:xxxx) \param outbits pointer to aim at the CAS bits */ -KS_DECLARE(int) ks_config_get_cas_bits(char *strvalue, unsigned char *outbits); + KS_DECLARE(int) ks_config_get_cas_bits(char *strvalue, unsigned char *outbits); /** @} */ -#ifdef __cplusplus -} -#endif /* defined(__cplusplus) */ - -#endif /* defined(KS_CONFIG_H) */ - +KS_END_EXTERN_C +#endif /* defined(KS_CONFIG_H) */ /* For Emacs: * Local Variables: * mode:c diff --git a/libs/libks/src/include/ks_dht.h b/libs/libks/src/include/ks_dht.h new file mode 100644 index 0000000000..2b5b9cb467 --- /dev/null +++ b/libs/libks/src/include/ks_dht.h @@ -0,0 +1,109 @@ +/* +Copyright (c) 2009-2011 by Juliusz Chroboczek + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ + +#ifndef _KS_DHT_H +#define _KS_DHT_H + +#include "ks.h" +#include "ks_bencode.h" + +KS_BEGIN_EXTERN_C + +typedef enum { + KS_DHT_EVENT_NONE = 0, + KS_DHT_EVENT_VALUES = 1, + KS_DHT_EVENT_VALUES6 = 2, + KS_DHT_EVENT_SEARCH_DONE = 3, + KS_DHT_EVENT_SEARCH_DONE6 = 4 +} ks_dht_event_t; + + +typedef enum { + DHT_PARAM_AUTOROUTE = 1 +} ks_dht_param_t; + +typedef enum { + KS_DHT_AF_INET4 = (1 << 0), + KS_DHT_AF_INET6 = (1 << 1) +} ks_dht_af_flag_t; + + +typedef void (*dht_callback_t)(void *closure, ks_dht_event_t event, const unsigned char *info_hash, const void *data, size_t data_len); + +typedef struct dht_handle_s dht_handle_t; + +KS_DECLARE(int) dht_periodic(dht_handle_t *h, const void *buf, size_t buflen, ks_sockaddr_t *from); +KS_DECLARE(ks_status_t) ks_dht_init(dht_handle_t **handle, ks_dht_af_flag_t af_flags, const unsigned char *id, unsigned int port); +KS_DECLARE(void) ks_dht_set_param(dht_handle_t *h, ks_dht_param_t param, ks_bool_t val); +KS_DECLARE(ks_status_t) ks_dht_add_ip(dht_handle_t *h, char *ip, int port); +KS_DECLARE(void) ks_dht_start(dht_handle_t *h); +KS_DECLARE(int) dht_insert_node(dht_handle_t *h, const unsigned char *id, ks_sockaddr_t *sa); +KS_DECLARE(int) dht_ping_node(dht_handle_t *h, ks_sockaddr_t *sa); +KS_DECLARE(int) dht_search(dht_handle_t *h, const unsigned char *id, int port, int af, dht_callback_t callback, void *closure); +KS_DECLARE(int) dht_nodes(dht_handle_t *h, int af, int *good_return, int *dubious_return, int *cached_return, int *incoming_return); +KS_DECLARE(ks_status_t) ks_dht_one_loop(dht_handle_t *h, int timeout); +KS_DECLARE(ks_status_t) ks_dht_get_bind_addrs(dht_handle_t *h, const ks_sockaddr_t ***addrs, ks_size_t *addrlen); +KS_DECLARE(void) ks_dht_set_callback(dht_handle_t *h, dht_callback_t callback, void *closure); +KS_DECLARE(void) ks_dht_set_port(dht_handle_t *h, unsigned int port); +KS_DECLARE(void) dht_dump_tables(dht_handle_t *h, FILE *f); +KS_DECLARE(int) dht_get_nodes(dht_handle_t *h, struct sockaddr_in *sin, int *num, struct sockaddr_in6 *sin6, int *num6); +KS_DECLARE(int) dht_uninit(dht_handle_t **h); +KS_DECLARE(void) ks_dht_set_v(dht_handle_t *h, const unsigned char *v); +KS_DECLARE(int) ks_dht_calculate_mutable_storage_target(unsigned char *pk, unsigned char *salt, int salt_length, unsigned char *target, int target_length); +KS_DECLARE(int) ks_dht_generate_mutable_storage_args(struct bencode *data, int64_t sequence, int cas, + unsigned char *id, int id_len, /* querying nodes id */ + const unsigned char *sk, const unsigned char *pk, + unsigned char *salt, unsigned long long salt_length, + unsigned char *token, unsigned long long token_length, + unsigned char *signature, unsigned long long *signature_length, + struct bencode **arguments); + +/* This must be provided by the user. */ +KS_DECLARE(int) dht_blacklisted(const ks_sockaddr_t *sa); +KS_DECLARE(void) dht_hash(void *hash_return, int hash_size, const void *v1, int len1, const void *v2, int len2, const void *v3, int len3); +KS_DECLARE(int) dht_random_bytes(void *buf, size_t size); + +KS_DECLARE(int) ks_dht_send_message_mutable(dht_handle_t *h, unsigned char *sk, unsigned char *pk, char **node_id, + char *message_id, int sequence, char *message, ks_time_t life); + +KS_DECLARE(int) ks_dht_send_message_mutable_cjson(dht_handle_t *h, unsigned char *sk, unsigned char *pk, char **node_id, + char *message_id, int sequence, cJSON *message, ks_time_t life); + +typedef void (ks_dht_store_entry_json_cb)(struct dht_handle_s *h, const cJSON *msg, void *obj); +KS_DECLARE(void) ks_dht_store_entry_json_cb_set(struct dht_handle_s *h, ks_dht_store_entry_json_cb *store_json_cb, void *arg); + +KS_DECLARE(int) ks_dht_api_find_node(dht_handle_t *h, char *node_id_hex, char *target_hex, ks_bool_t ipv6); + +KS_END_EXTERN_C + +#endif /* _KS_DHT_H */ + +/* For Emacs: + * Local Variables: + * mode:c + * indent-tabs-mode:t + * tab-width:4 + * c-basic-offset:4 + * End: + * For VIM: + * vim:set softtabstop=4 shiftwidth=4 tabstop=4 noet: + */ diff --git a/libs/libks/src/include/ks_dso.h b/libs/libks/src/include/ks_dso.h new file mode 100755 index 0000000000..35c9763872 --- /dev/null +++ b/libs/libks/src/include/ks_dso.h @@ -0,0 +1,49 @@ +/* + * Cross Platform dso/dll load abstraction + * Copyright(C) 2008 Michael Jerris + * + * You may opt to use, copy, modify, merge, publish, distribute and/or sell + * copies of the Software, and permit persons to whom the Software is + * furnished to do so. + * + * This work is provided under this license on an "as is" basis, without warranty of any kind, + * either expressed or implied, including, without limitation, warranties that the covered code + * is free of defects, merchantable, fit for a particular purpose or non-infringing. The entire + * risk as to the quality and performance of the covered code is with you. Should any covered + * code prove defective in any respect, you (not the initial developer or any other contributor) + * assume the cost of any necessary servicing, repair or correction. This disclaimer of warranty + * constitutes an essential part of this license. No use of any covered code is authorized hereunder + * except under this disclaimer. + * + */ + +#include "ks.h" + +#ifndef _KS_DSO_H +#define _KS_DSO_H + +KS_BEGIN_EXTERN_C + +typedef void (*ks_func_ptr_t) (void); +typedef void * ks_dso_lib_t; + +KS_DECLARE(ks_status_t) ks_dso_destroy(ks_dso_lib_t *lib); +KS_DECLARE(ks_dso_lib_t) ks_dso_open(const char *path, char **err); +KS_DECLARE(void *) ks_dso_func_sym(ks_dso_lib_t lib, const char *sym, char **err); +KS_DECLARE(char *) ks_build_dso_path(const char *name, char *path, ks_size_t len); + +KS_END_EXTERN_C + +#endif + +/* For Emacs: + * Local Variables: + * mode:c + * indent-tabs-mode:t + * tab-width:4 + * c-basic-offset:4 + * End: + * For VIM: + * vim:set softtabstop=4 shiftwidth=4 tabstop=4 noet: + */ + diff --git a/libs/libks/src/include/ks_hash.h b/libs/libks/src/include/ks_hash.h new file mode 100644 index 0000000000..2f6b9da1a7 --- /dev/null +++ b/libs/libks/src/include/ks_hash.h @@ -0,0 +1,372 @@ +/* + * FreeSWITCH Modular Media Switching Software Library / Soft-Switch Application + * + * ks_hash.h -- Ks_Hash + * + */ + + +/* ks_hash.h Copyright (C) 2002 Christopher Clark */ + +#ifndef __KS_HASH_CWC22_H__ +#define __KS_HASH_CWC22_H__ + +#ifdef _MSC_VER +#ifndef __inline__ +#define __inline__ __inline +#endif +#endif + +#include "ks.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct ks_hash ks_hash_t; +typedef struct ks_hash_iterator ks_hash_iterator_t; + +typedef enum { + KS_UNLOCKED, + KS_READLOCKED +} ks_locked_t; + + +/* Example of use: + * + * ks_hash_t *h; + * struct some_key *k; + * struct some_value *v; + * + * static unsigned int hash_from_key_fn( void *k ); + * static int keys_equal_fn ( void *key1, void *key2 ); + * + * h = ks_hash_create(16, hash_from_key_fn, keys_equal_fn); + * k = (struct some_key *) malloc(sizeof(struct some_key)); + * v = (struct some_value *) malloc(sizeof(struct some_value)); + * + * (initialise k and v to suitable values) + * + * if (! ks_hash_insert(h,k,v) ) + * { exit(-1); } + * + * if (NULL == (found = ks_hash_search(h,k) )) + * { printf("not found!"); } + * + * if (NULL == (found = ks_hash_remove(h,k) )) + * { printf("Not found\n"); } + * + */ + +/* Macros may be used to define type-safe(r) ks_hash access functions, with + * methods specialized to take known key and value types as parameters. + * + * Example: + * + * Insert this at the start of your file: + * + * DEFINE_KS_HASH_INSERT(insert_some, struct some_key, struct some_value); + * DEFINE_KS_HASH_SEARCH(search_some, struct some_key, struct some_value); + * DEFINE_KS_HASH_REMOVE(remove_some, struct some_key, struct some_value); + * + * This defines the functions 'insert_some', 'search_some' and 'remove_some'. + * These operate just like ks_hash_insert etc., with the same parameters, + * but their function signatures have 'struct some_key *' rather than + * 'void *', and hence can generate compile time errors if your program is + * supplying incorrect data as a key (and similarly for value). + * + * Note that the hash and key equality functions passed to ks_hash_create + * still take 'void *' parameters instead of 'some key *'. This shouldn't be + * a difficult issue as they're only defined and passed once, and the other + * functions will ensure that only valid keys are supplied to them. + * + * The cost for this checking is increased code size and runtime overhead + * - if performance is important, it may be worth switching back to the + * unsafe methods once your program has been debugged with the safe methods. + * This just requires switching to some simple alternative defines - eg: + * #define insert_some ks_hash_insert + * + */ + + +typedef enum { + KS_HASH_FLAG_NONE = 0, + KS_HASH_FLAG_DEFAULT = (1 << 0), + KS_HASH_FLAG_FREE_KEY = (1 << 1), + KS_HASH_FLAG_FREE_VALUE = (1 << 2), + KS_HASH_FLAG_RWLOCK = (1 << 3), + KS_HASH_FLAG_DUP_CHECK = (1 << 4) +} ks_hash_flag_t; + +#define KS_HASH_FREE_BOTH KS_HASH_FLAG_FREE_KEY | KS_HASH_FLAG_FREE_VALUE + +typedef enum { + KS_HASH_MODE_DEFAULT = 0, + KS_HASH_MODE_CASE_SENSITIVE, + KS_HASH_MODE_CASE_INSENSITIVE, + KS_HASH_MODE_INT, + KS_HASH_MODE_INT64, + KS_HASH_MODE_PTR +} ks_hash_mode_t; + + + +/***************************************************************************** + * ks_hash_create + + * @name ks_hash_create + * @param minsize minimum initial size of ks_hash + * @param hashfunction function for hashing keys + * @param key_eq_fn function for determining key equality + * @return newly created ks_hash or NULL on failure + */ + +KS_DECLARE(ks_status_t) +ks_hash_create_ex(ks_hash_t **hp, unsigned int minsize, + unsigned int (*hashfunction) (void*), + int (*key_eq_fn) (void*,void*), ks_hash_mode_t mode, ks_hash_flag_t flags, ks_hash_destructor_t destructor, ks_pool_t *pool); + +/***************************************************************************** + * ks_hash_insert + + * @name ks_hash_insert + * @param h the ks_hash to insert into + * @param k the key - ks_hash claims ownership and will free on removal + * @param v the value - does not claim ownership + * @return non-zero for successful insertion + * + * This function will cause the table to expand if the insertion would take + * the ratio of entries to table size over the maximum load factor. + * + * This function does not check for repeated insertions with a duplicate key. + * The value returned when using a duplicate key is undefined -- when + * the ks_hash changes size, the order of retrieval of duplicate key + * entries is reversed. + * If in doubt, remove before insert. + */ + + +KS_DECLARE(int) ks_hash_insert_ex(ks_hash_t *h, void *k, void *v, ks_hash_flag_t flags, ks_hash_destructor_t destructor); +#define ks_hash_insert(_h, _k, _v) ks_hash_insert_ex(_h, _k, _v, 0, NULL) + +#define DEFINE_KS_HASH_INSERT(fnname, keytype, valuetype) \ + int fnname (ks_hash_t *h, keytype *k, valuetype *v) \ + { \ + return ks_hash_insert(h,k,v); \ + } + + +KS_DECLARE(void) ks_hash_set_flags(ks_hash_t *h, ks_hash_flag_t flags); +KS_DECLARE(void) ks_hash_set_destructor(ks_hash_t *h, ks_hash_destructor_t destructor); + +/***************************************************************************** + * ks_hash_search + + * @name ks_hash_search + * @param h the ks_hash to search + * @param k the key to search for - does not claim ownership + * @return the value associated with the key, or NULL if none found + */ + +KS_DECLARE(void *) +ks_hash_search(ks_hash_t *h, void *k, ks_locked_t locked); + +#define DEFINE_KS_HASH_SEARCH(fnname, keytype, valuetype) \ + valuetype * fnname (ks_hash_t *h, keytype *k) \ + { \ + return (valuetype *) (ks_hash_search(h,k)); \ + } + +/***************************************************************************** + * ks_hash_remove + + * @name ks_hash_remove + * @param h the ks_hash to remove the item from + * @param k the key to search for - does not claim ownership + * @return the value associated with the key, or NULL if none found + */ + +KS_DECLARE(void *) /* returns value */ +ks_hash_remove(ks_hash_t *h, void *k); + +#define DEFINE_KS_HASH_REMOVE(fnname, keytype, valuetype) \ + valuetype * fnname (ks_hash_t *h, keytype *k) \ + { \ + return (valuetype *) (ks_hash_remove(h,k)); \ + } + + +/***************************************************************************** + * ks_hash_count + + * @name ks_hash_count + * @param h the ks_hash + * @return the number of items stored in the ks_hash + */ +KS_DECLARE(unsigned int) +ks_hash_count(ks_hash_t *h); + +/***************************************************************************** + * ks_hash_destroy + + * @name ks_hash_destroy + * @param h the ks_hash + * @param free_values whether to call 'free' on the remaining values + */ + +KS_DECLARE(void) +ks_hash_destroy(ks_hash_t **h); + +KS_DECLARE(ks_hash_iterator_t*) ks_hash_first(ks_hash_t *h, ks_locked_t locked); +KS_DECLARE(void) ks_hash_last(ks_hash_iterator_t **iP); +KS_DECLARE(ks_hash_iterator_t*) ks_hash_next(ks_hash_iterator_t **iP); +KS_DECLARE(void) ks_hash_this(ks_hash_iterator_t *i, const void **key, ks_ssize_t *klen, void **val); +KS_DECLARE(void) ks_hash_this_val(ks_hash_iterator_t *i, void *val); +KS_DECLARE(ks_status_t) ks_hash_create(ks_hash_t **hp, ks_hash_mode_t mode, ks_hash_flag_t flags, ks_pool_t *pool); + +KS_DECLARE(void) ks_hash_write_lock(ks_hash_t *h); +KS_DECLARE(void) ks_hash_write_unlock(ks_hash_t *h); +KS_DECLARE(ks_status_t) ks_hash_read_lock(ks_hash_t *h); +KS_DECLARE(ks_status_t) ks_hash_read_unlock(ks_hash_t *h); + + +static __inline uint32_t ks_hash_default_int64(void *ky) +{ + int64_t key = *((int64_t *)ky); + key = (~key) + (key << 18); + key = key ^ (key >> 31); + key = key * 21; + key = key ^ (key >> 11); + key = key + (key << 6); + key = key ^ (key >> 22); + return (uint32_t) key; +} + +static __inline int ks_hash_equalkeys_int64(void *k1, void *k2) +{ + return *(uint64_t *)k1 == *(uint64_t *)k2; +} + +static __inline uint32_t ks_hash_default_int(void *ky) { + uint32_t x = *((uint32_t *)ky); + x = ((x >> 16) ^ x) * 0x45d9f3b; + x = ((x >> 16) ^ x) * 0x45d9f3b; + x = ((x >> 16) ^ x); + return x; +} + +static __inline int ks_hash_equalkeys_int(void *k1, void *k2) +{ + return *(uint32_t *)k1 == *(uint32_t *)k2; +} +#if 0 +} +#endif + +static __inline uint32_t ks_hash_default_ptr(void *ky) +{ +#ifdef KS_64BIT + return ks_hash_default_int64(ky); +#endif + return ks_hash_default_int(ky); +} + +static __inline int ks_hash_equalkeys_ptr(void *k1, void *k2) +{ +#ifdef KS_64BIT + return ks_hash_equalkeys_int64(k1, k2); +#endif + return ks_hash_equalkeys_int(k1, k2); +} + + +static __inline int ks_hash_equalkeys(void *k1, void *k2) +{ + return strcmp((char *) k1, (char *) k2) ? 0 : 1; +} + +static __inline int ks_hash_equalkeys_ci(void *k1, void *k2) +{ + return strcasecmp((char *) k1, (char *) k2) ? 0 : 1; +} + +static __inline uint32_t ks_hash_default(void *ky) +{ + unsigned char *str = (unsigned char *) ky; + uint32_t hash = 0; + int c; + + while ((c = *str)) { + str++; + hash = c + (hash << 6) + (hash << 16) - hash; + } + + return hash; +} + +static __inline uint32_t ks_hash_default_ci(void *ky) +{ + unsigned char *str = (unsigned char *) ky; + uint32_t hash = 0; + int c; + + while ((c = ks_tolower(*str))) { + str++; + hash = c + (hash << 6) + (hash << 16) - hash; + } + + return hash; +} + + + + +#ifdef __cplusplus +} /* extern C */ +#endif + +#endif /* __KS_HASH_CWC22_H__ */ + +/* + * Copyright (c) 2002, Christopher Clark + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * * Neither the name of the original author; nor the names of any contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER + * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* For Emacs: + * Local Variables: + * mode:c + * indent-tabs-mode:t + * tab-width:4 + * c-basic-offset:4 + * End: + * For VIM: + * vim:set softtabstop=4 shiftwidth=4 tabstop=4 noet: + */ diff --git a/libs/libks/src/include/ks_json.h b/libs/libks/src/include/ks_json.h old mode 100755 new mode 100644 index 1ad116e980..60cdc64adf --- a/libs/libks/src/include/ks_json.h +++ b/libs/libks/src/include/ks_json.h @@ -76,20 +76,21 @@ KS_DECLARE(int) cJSON_GetArraySize(cJSON *array); /* Retrieve item number "item" from array "array". Returns NULL if unsuccessful. */ KS_DECLARE(cJSON *)cJSON_GetArrayItem(cJSON *array,int item); /* Get item "string" from object. Case insensitive. */ -KS_DECLARE(cJSON *)cJSON_GetObjectItem(cJSON *object,const char *string); +KS_DECLARE(cJSON *)cJSON_GetObjectItem(const cJSON *object,const char *string); +KS_DECLARE(const char *)cJSON_GetObjectCstr(const cJSON *object, const char *string); /* For analysing failed parses. This returns a pointer to the parse error. You'll probably need to look a few chars back to make sense of it. Defined when cJSON_Parse() returns 0. 0 when cJSON_Parse() succeeds. */ -KS_DECLARE(const char *)cJSON_GetErrorPtr(); +KS_DECLARE(const char *)cJSON_GetErrorPtr(void); /* These calls create a cJSON item of the appropriate type. */ -KS_DECLARE(cJSON *)cJSON_CreateNull(); -KS_DECLARE(cJSON *)cJSON_CreateTrue(); -KS_DECLARE(cJSON *)cJSON_CreateFalse(); +KS_DECLARE(cJSON *)cJSON_CreateNull(void); +KS_DECLARE(cJSON *)cJSON_CreateTrue(void); +KS_DECLARE(cJSON *)cJSON_CreateFalse(void); KS_DECLARE(cJSON *)cJSON_CreateBool(int b); KS_DECLARE(cJSON *)cJSON_CreateNumber(double num); KS_DECLARE(cJSON *)cJSON_CreateString(const char *string); -KS_DECLARE(cJSON *)cJSON_CreateArray(); -KS_DECLARE(cJSON *)cJSON_CreateObject(); +KS_DECLARE(cJSON *)cJSON_CreateArray(void); +KS_DECLARE(cJSON *)cJSON_CreateObject(void); /* These utilities create an Array of count items. */ KS_DECLARE(cJSON *)cJSON_CreateIntArray(int *numbers,int count); @@ -114,12 +115,68 @@ KS_DECLARE(void) cJSON_DeleteItemFromObject(cJSON *object,const char *string); KS_DECLARE(void) cJSON_ReplaceItemInArray(cJSON *array,int which,cJSON *newitem); KS_DECLARE(void) cJSON_ReplaceItemInObject(cJSON *object,const char *string,cJSON *newitem); +/* Duplicate a cJSON item */ +KS_DECLARE(cJSON *) cJSON_Duplicate(cJSON *item,int recurse); +/* Duplicate will create a new, identical cJSON item to the one you pass, in new memory that will + need to be released. With recurse!=0, it will duplicate any children connected to the item. + The item->next and ->prev pointers are always zero on return from Duplicate. */ + + #define cJSON_AddNullToObject(object,name) cJSON_AddItemToObject(object, name, cJSON_CreateNull()) #define cJSON_AddTrueToObject(object,name) cJSON_AddItemToObject(object, name, cJSON_CreateTrue()) #define cJSON_AddFalseToObject(object,name) cJSON_AddItemToObject(object, name, cJSON_CreateFalse()) #define cJSON_AddNumberToObject(object,name,n) cJSON_AddItemToObject(object, name, cJSON_CreateNumber(n)) #define cJSON_AddStringToObject(object,name,s) cJSON_AddItemToObject(object, name, cJSON_CreateString(s)) +KS_DECLARE(cJSON *) cJSON_CreateStringPrintf(const char *fmt, ...); + +static inline cJSON *json_add_child_obj(cJSON *json, const char *name, cJSON *obj) +{ + cJSON *new_json = NULL; + + ks_assert(json); + + if (obj) { + new_json = obj; + } else { + new_json = cJSON_CreateObject(); + } + + ks_assert(new_json); + + cJSON_AddItemToObject(json, name, new_json); + + return new_json; +} + +static inline cJSON *json_add_child_array(cJSON *json, const char *name) +{ + cJSON *new_json = NULL; + + ks_assert(json); + + new_json = cJSON_CreateArray(); + ks_assert(new_json); + + cJSON_AddItemToObject(json, name, new_json); + + return new_json; +} + +static inline cJSON *json_add_child_string(cJSON *json, const char *name, const char *val) +{ + cJSON *new_json = NULL; + + ks_assert(json); + + new_json = cJSON_CreateString(val); + ks_assert(new_json); + + cJSON_AddItemToObject(json, name, new_json); + + return new_json; +} + #ifdef __cplusplus } #endif diff --git a/libs/libks/src/include/ks_platform.h b/libs/libks/src/include/ks_platform.h index a310e474d0..f3028b9de2 100644 --- a/libs/libks/src/include/ks_platform.h +++ b/libs/libks/src/include/ks_platform.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007-2014, Anthony Minessale II + * Copyright (c) 2007-2015, Anthony Minessale II * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -34,33 +34,94 @@ #ifndef _KS_PLATFORM_H_ #define _KS_PLATFORM_H_ -#include +KS_BEGIN_EXTERN_C -#ifdef __cplusplus -extern "C" { -#endif /* defined(__cplusplus) */ - -#if !defined(_XOPEN_SOURCE) && !defined(__FreeBSD__) && !defined(__NetBSD__) && !defined(__OpenBSD__) +#if !defined(_XOPEN_SOURCE) && !defined(__FreeBSD__) && !defined(__NetBSD__) && !defined(__OpenBSD__) && !defined(__APPLE__) #define _XOPEN_SOURCE 600 #endif -#ifndef HAVE_STRINGS_H -#define HAVE_STRINGS_H 1 -#endif -#ifndef HAVE_SYS_SOCKET_H -#define HAVE_SYS_SOCKET_H 1 +#if defined(__linux__) && !defined(_DEFAULT_SOURCE) +#define _DEFAULT_SOURCE 1 #endif -#ifndef __WINDOWS__ -#if defined(WIN32) || defined(WIN64) || defined(_MSC_VER) || defined(_WIN32) +#if !defined(__WINDOWS__) && (defined(WIN32) || defined(WIN64) || defined(_MSC_VER) || defined(_WIN32)) #define __WINDOWS__ #endif + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE #endif +#if UINTPTR_MAX == 0xffffffffffffffff +#define KS_64BIT 1 +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __WINDOWS__ +#include +#include +#include +#else +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#endif + #ifdef _MSC_VER +#pragma comment(lib, "Ws2_32.lib") + +#include +/*#include +#include +#include +*/ +#ifndef open +#define open _open +#endif + +#ifndef close +#define close _close +#endif + +#ifndef read +#define read _read +#endif + +#ifndef write +#define write _write +#endif + #ifndef __inline__ #define __inline__ __inline #endif + +#ifndef strdup +#define strdup _strdup +#endif + #if (_MSC_VER >= 1400) /* VC8+ */ #ifndef _CRT_SECURE_NO_DEPRECATE #define _CRT_SECURE_NO_DEPRECATE @@ -69,67 +130,62 @@ extern "C" { #define _CRT_NONSTDC_NO_DEPRECATE #endif #endif + #ifndef strcasecmp #define strcasecmp(s1, s2) _stricmp(s1, s2) #endif + #ifndef strncasecmp #define strncasecmp(s1, s2, n) _strnicmp(s1, s2, n) #endif + #if (_MSC_VER < 1900) /* VC 2015 */ #ifndef snprintf #define snprintf _snprintf #endif #endif + #ifndef S_IRUSR #define S_IRUSR _S_IREAD #endif + #ifndef S_IWUSR #define S_IWUSR _S_IWRITE #endif -#undef HAVE_STRINGS_H -#undef HAVE_SYS_SOCKET_H + +#endif /* _MSC_VER */ + +#if (_MSC_VER >= 1400) // VC8+ +#define ks_assert(expr) assert(expr);__analysis_assume( expr ) #endif -#include -#ifndef WIN32 -#include +#ifndef ks_assert +#define ks_assert(_x) assert(_x) #endif -#include -#include -#include -#include -#ifndef WIN32 -#include -#include -#include -#include -#include -#include -#endif +#ifdef __WINDOWS__ + typedef SOCKET ks_socket_t; + typedef unsigned __int64 uint64_t; + typedef unsigned __int32 uint32_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int8 uint8_t; + typedef __int64 int64_t; + typedef __int32 int32_t; + typedef __int16 int16_t; + typedef __int8 int8_t; + typedef intptr_t ks_ssize_t; + typedef int ks_filehandle_t; -#ifdef HAVE_STRINGS_H -#include -#endif -#include - - -#ifdef WIN32 -#include -#include -typedef SOCKET ks_socket_t; -typedef unsigned __int64 uint64_t; -typedef unsigned __int32 uint32_t; -typedef unsigned __int16 uint16_t; -typedef unsigned __int8 uint8_t; -typedef __int64 int64_t; -typedef __int32 int32_t; -typedef __int16 int16_t; -typedef __int8 int8_t; -typedef intptr_t ks_ssize_t; -typedef int ks_filehandle_t; #define KS_SOCK_INVALID INVALID_SOCKET #define strerror_r(num, buf, size) strerror_s(buf, size, num) +#else +#define KS_SOCK_INVALID -1 + typedef int ks_socket_t; + typedef ssize_t ks_ssize_t; + typedef int ks_filehandle_t; +#endif + +#ifdef __WINDOWS__ #if defined(KS_DECLARE_STATIC) #define KS_DECLARE(type) type __stdcall #define KS_DECLARE_NONSTD(type) type __cdecl @@ -143,30 +199,68 @@ typedef int ks_filehandle_t; #define KS_DECLARE_NONSTD(type) __declspec(dllimport) type __cdecl #define KS_DECLARE_DATA __declspec(dllimport) #endif -#else // !WIN32 +#else // !WIN32 +#if (defined(__GNUC__) || defined(__SUNPRO_CC) || defined (__SUNPRO_C)) && defined(KS_API_VISIBILITY) +#define KS_DECLARE(type) __attribute__((visibility("default"))) type +#define KS_DECLARE_NONSTD(type) __attribute__((visibility("default"))) type +#define KS_DECLARE_DATA __attribute__((visibility("default"))) +#else #define KS_DECLARE(type) type #define KS_DECLARE_NONSTD(type) type #define KS_DECLARE_DATA -#include -#include -#include -#include -#include -#include -#include -#define KS_SOCK_INVALID -1 -typedef int ks_socket_t; -typedef ssize_t ks_ssize_t; -typedef int ks_filehandle_t; +#endif #endif +/* malloc or DIE macros */ +#ifdef NDEBUG +#define ks_malloc(ptr, len) (void)( (!!(ptr = malloc(len))) || (fprintf(stderr,"ABORT! Malloc failure at: %s:%d", __FILE__, __LINE__),abort(), 0), ptr ) +#define ks_zmalloc(ptr, len) (void)( (!!(ptr = calloc(1, (len)))) || (fprintf(stderr,"ABORT! Malloc failure at: %s:%d", __FILE__, __LINE__),abort(), 0), ptr) +#if (_MSC_VER >= 1500) // VC9+ +#define ks_strdup(ptr, s) (void)( (!!(ptr = _strdup(s))) || (fprintf(stderr,"ABORT! Malloc failure at: %s:%d", __FILE__, __LINE__),abort(), 0), ptr) +#else +#define ks_strdup(ptr, s) (void)( (!!(ptr = strdup(s))) || (fprintf(stderr,"ABORT! Malloc failure at: %s:%d", __FILE__, __LINE__),abort(), 0), ptr) +#endif +#else +#if (_MSC_VER >= 1500) // VC9+ +#define ks_malloc(ptr, len) (void)(assert(((ptr) = malloc((len)))),ptr);__analysis_assume( ptr ) +#define ks_zmalloc(ptr, len) (void)(assert((ptr = calloc(1, (len)))),ptr);__analysis_assume( ptr ) +#define ks_strdup(ptr, s) (void)(assert(((ptr) = _strdup(s))),ptr);__analysis_assume( ptr ) +#else +#define ks_malloc(ptr, len) (void)(assert(((ptr) = malloc((len)))),ptr) +#define ks_zmalloc(ptr, len) (void)(assert((ptr = calloc(1, (len)))),ptr) +#define ks_strdup(ptr, s) (void)(assert(((ptr) = strdup((s)))),ptr) +#endif +#endif +#ifndef __ATTR_SAL + /* used for msvc code analysis */ + /* http://msdn2.microsoft.com/en-us/library/ms235402.aspx */ +#define _In_ +#define _In_z_ +#define _In_opt_z_ +#define _In_opt_ +#define _Printf_format_string_ +#define _Ret_opt_z_ +#define _Ret_z_ +#define _Out_opt_ +#define _Out_ +#define _Check_return_ +#define _Inout_ +#define _Inout_opt_ +#define _In_bytecount_(x) +#define _Out_opt_bytecapcount_(x) +#define _Out_bytecapcount_(x) +#define _Ret_ +#define _Post_z_ +#define _Out_cap_(x) +#define _Out_z_cap_(x) +#define _Out_ptrdiff_cap_(x) +#define _Out_opt_ptrdiff_cap_(x) +#define _Post_count_(x) +#endif -#ifdef __cplusplus -} -#endif /* defined(__cplusplus) */ -#endif /* defined(_KS_PLATFORM_H_) */ - +KS_END_EXTERN_C +#endif /* defined(_KS_PLATFORM_H_) */ /* For Emacs: * Local Variables: * mode:c diff --git a/libs/libks/src/include/ks_pool.h b/libs/libks/src/include/ks_pool.h new file mode 100644 index 0000000000..efa2677e51 --- /dev/null +++ b/libs/libks/src/include/ks_pool.h @@ -0,0 +1,478 @@ +/* + * Memory pool defines. + * + * Copyright 1996 by Gray Watson. + * + * This file is part of the ks_mpool package. + * + * Permission to use, copy, modify, and distribute this software for + * any purpose and without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies, and that the name of Gray Watson not be used in advertising + * or publicity pertaining to distribution of the document or software + * without specific, written prior permission. + * + * Gray Watson makes no representations about the suitability of the + * software described herein for any purpose. It is provided "as is" + * without express or implied warranty. + * + * The author may be reached via http://256.com/gray/ + * + * $Id: ks_mpool.h,v 1.4 2006/05/31 20:26:11 gray Exp $ + */ + +#ifndef __KS_POOL_H__ +#define __KS_POOL_H__ + +#include "ks.h" + +KS_BEGIN_EXTERN_C + +/* + * ks_pool flags to ks_pool_alloc or ks_pool_set_attr + */ + +typedef enum { + KS_POOL_FLAG_DEFAULT = 0, + + KS_POOL_FLAG_BEST_FIT = (1 << 0), +/* + * Choose a best fit algorithm not first fit. This takes more CPU + * time but will result in a tighter heap. + */ + + KS_POOL_FLAG_HEAVY_PACKING = (1 << 1) +/* + * This enables very heavy packing at the possible expense of CPU. + * This affects a number of parts of the library. + * + * By default the 1st page of memory is reserved for the main ks_pool + * structure. This flag will cause the rest of the 1st block to be + * available for use as user memory. + * + * By default the library looks through the memory when freed looking + * for a magic value. There is an internal max size that it will look + * and then it will give up. This flag forces it to look until it + * finds it. + */ +} ks_pool_flag_t; + +/* + * Ks_Pool function IDs for the ks_pool_log_func callback function. + */ +#define KS_POOL_FUNC_CLOSE 1 /* ks_pool_close function called */ +#define KS_POOL_FUNC_CLEAR 2 /* ks_pool_clear function called */ +#define KS_POOL_FUNC_ALLOC 3 /* ks_pool_alloc function called */ +#define KS_POOL_FUNC_CALLOC 4 /* ks_pool_calloc function called */ +#define KS_POOL_FUNC_FREE 5 /* ks_pool_free function called */ +#define KS_POOL_FUNC_RESIZE 6 /* ks_pool_resize function called */ + +/* + * void ks_pool_log_func_t + * + * DESCRIPTION: + * + * Ks_Pool transaction log function. + * + * RETURNS: + * + * None. + * + * ARGUMENT: + * + * mp_p -> Associated ks_pool address. + * + * func_id -> Integer function ID which identifies which ks_pool + * function is being called. + * + * byte_size -> Optionally specified byte size. + * + * ele_n -> Optionally specified element number. For ks_pool_calloc + * only. + * + * new_addr -> Optionally specified new address. For ks_pool_alloc, + * ks_pool_calloc, and ks_pool_resize only. + * + * old_addr -> Optionally specified old address. For ks_pool_resize and + * ks_pool_free only. + * + * old_byte_size -> Optionally specified old byte size. For + * ks_pool_resize only. + */ +typedef void (*ks_pool_log_func_t) (const void *mp_p, + const int func_id, + const unsigned long byte_size, + const unsigned long ele_n, const void *old_addr, const void *new_addr, const unsigned long old_byte_size); + +/* + * ks_pool_t *ks_pool_open + * + * DESCRIPTION: + * + * Open/allocate a new memory pool. + * + * RETURNS: + * + * Success - KS_STATUS_SUCCESS + * + * Failure - ks_status_t error code + * + * ARGUMENTS: + * + * poolP <- pointer to new pool that will be set on success + * + */ + +KS_DECLARE(ks_status_t) ks_pool_open(ks_pool_t **poolP); + +/* + * ks_status_t ks_pool_close + * + * DESCRIPTION: + * + * Close/free a memory allocation pool previously opened with + * ks_pool_open. + * + * RETURNS: + * + * Success - KS_STATUS_SUCCESS + * + * Failure - ks_status_t error code + * + * ARGUMENTS: + * + * mp_pp <-> Pointer to pointer of our memory pool. + */ + +KS_DECLARE(ks_status_t) ks_pool_close(ks_pool_t **mp_pP); + +/* + * int ks_pool_clear + * + * DESCRIPTION: + * + * Wipe an opened memory pool clean so we can start again. + * + * RETURNS: + * + * Success - KS_STATUS_SUCCESS + * + * Failure - ks_status_t error code + * + * ARGUMENTS: + * + * mp_p <-> Pointer to our memory pool. + */ + +KS_DECLARE(ks_status_t) ks_pool_clear(ks_pool_t *mp_p); + +/* + * void *ks_pool_alloc + * + * DESCRIPTION: + * + * Allocate space for bytes inside of an already open memory pool. + * + * RETURNS: + * + * Success - Pointer to the address to use. + * + * Failure - NULL + * + * ARGUMENTS: + * + * mp_p <-> Pointer to the memory pool. If NULL then it will do a + * normal malloc. + * + * byte_size -> Number of bytes to allocate in the pool. Must be >0. + * + */ +KS_DECLARE(void *) ks_pool_alloc(ks_pool_t *mp_p, const unsigned long byte_size); + +/* + * void *ks_pool_alloc_ex + * + * DESCRIPTION: + * + * Allocate space for bytes inside of an already open memory pool. + * + * RETURNS: + * + * Success - Pointer to the address to use. + * + * Failure - NULL + * + * ARGUMENTS: + * + * mp_p <-> Pointer to the memory pool. If NULL then it will do a + * normal malloc. + * + * byte_size -> Number of bytes to allocate in the pool. Must be >0. + * + * error_p <- Pointer to integer which, if not NULL, will be set with + * a ks_pool error code. + */ +KS_DECLARE(void *) ks_pool_alloc_ex(ks_pool_t *mp_p, const unsigned long byte_size, ks_status_t *error_p); + +/* + * void *ks_pool_calloc + * + * DESCRIPTION: + * + * Allocate space for elements of bytes in the memory pool and zero + * the space afterwards. + * + * RETURNS: + * + * Success - Pointer to the address to use. + * + * Failure - NULL + * + * ARGUMENTS: + * + * mp_p <-> Pointer to the memory pool. If NULL then it will do a + * normal calloc. + * + * ele_n -> Number of elements to allocate. + * + * ele_size -> Number of bytes per element being allocated. + * + */ +KS_DECLARE(void *) ks_pool_calloc(ks_pool_t *mp_p, const unsigned long ele_n, const unsigned long ele_size); + +/* + * void *ks_pool_calloc_ex + * + * DESCRIPTION: + * + * Allocate space for elements of bytes in the memory pool and zero + * the space afterwards. + * + * RETURNS: + * + * Success - Pointer to the address to use. + * + * Failure - NULL + * + * ARGUMENTS: + * + * mp_p <-> Pointer to the memory pool. If NULL then it will do a + * normal calloc. + * + * ele_n -> Number of elements to allocate. + * + * ele_size -> Number of bytes per element being allocated. + * + * error_p <- Pointer to integer which, if not NULL, will be set with + * a ks_pool error code. + */ +KS_DECLARE(void *) ks_pool_calloc_ex(ks_pool_t *mp_p, const unsigned long ele_n, const unsigned long ele_size, ks_status_t *error_p); + +/* + * int ks_pool_free + * + * DESCRIPTION: + * + * Free an address from a memory pool. + * + * RETURNS: + * + * Success - KS_STATUS_SUCCESS + * + * Failure - ks_status_t error code + * + * ARGUMENTS: + * + * mp_p <-> Pointer to the memory pool. If NULL then it will do a + * normal free. + * + * addr <-> Address to free. + * + */ + +KS_DECLARE(ks_status_t) ks_pool_free(ks_pool_t *mp_p, void *addr); + +/* + * void *ks_pool_resize + * + * DESCRIPTION: + * + * Reallocate an address in a mmeory pool to a new size. + * + * RETURNS: + * + * Success - Pointer to the address to use. + * + * Failure - NULL + * + * ARGUMENTS: + * + * mp_p <-> Pointer to the memory pool. If NULL then it will do a + * normal realloc. + * + * old_addr -> Previously allocated address. + * + * new_byte_size -> New size of the allocation. + * + */ +KS_DECLARE(void *) ks_pool_resize(ks_pool_t *mp_p, void *old_addr, const unsigned long new_byte_size); + +/* + * void *ks_pool_resize_ex + * + * DESCRIPTION: + * + * Reallocate an address in a mmeory pool to a new size. + * + * RETURNS: + * + * Success - Pointer to the address to use. + * + * Failure - NULL + * + * ARGUMENTS: + * + * mp_p <-> Pointer to the memory pool. If NULL then it will do a + * normal realloc. + * + * old_addr -> Previously allocated address. + * + * new_byte_size -> New size of the allocation. + * + * error_p <- Pointer to integer which, if not NULL, will be set with + * a ks_pool error code. + */ +KS_DECLARE(void *) ks_pool_resize_ex(ks_pool_t *mp_p, void *old_addr, const unsigned long new_byte_size, ks_status_t *error_p); + +/* + * int ks_pool_stats + * + * DESCRIPTION: + * + * Return stats from the memory pool. + * + * RETURNS: + * + * Success - KS_STATUS_SUCCESS + * + * Failure - ks_status_t error code + * + * ARGUMENTS: + * + * mp_p -> Pointer to the memory pool. + * + * page_size_p <- Pointer to an unsigned integer which, if not NULL, + * will be set to the page-size of the pool. + * + * num_alloced_p <- Pointer to an unsigned long which, if not NULL, + * will be set to the number of pointers currently allocated in pool. + * + * user_alloced_p <- Pointer to an unsigned long which, if not NULL, + * will be set to the number of user bytes allocated in this pool. + * + * max_alloced_p <- Pointer to an unsigned long which, if not NULL, + * will be set to the maximum number of user bytes that have been + * allocated in this pool. + * + * tot_alloced_p <- Pointer to an unsigned long which, if not NULL, + * will be set to the total amount of space (including administrative + * overhead) used by the pool. + */ +KS_DECLARE(ks_status_t) ks_pool_stats(const ks_pool_t *mp_p, unsigned int *page_size_p, + unsigned long *num_alloced_p, unsigned long *user_alloced_p, unsigned long *max_alloced_p, unsigned long *tot_alloced_p); + +/* + * int ks_pool_set_log_func + * + * DESCRIPTION: + * + * Set a logging callback function to be called whenever there was a + * memory transaction. See ks_pool_log_func_t. + * + * RETURNS: + * + * Success - KS_STATUS_SUCCESS + * + * Failure - ks_status_t error code + * + * ARGUMENTS: + * + * mp_p <-> Pointer to the memory pool. + * + * log_func -> Log function (defined in ks_pool.h) which will be called + * with each ks_pool transaction. + */ +KS_DECLARE(ks_status_t) ks_pool_set_log_func(ks_pool_t *mp_p, ks_pool_log_func_t log_func); + +/* + * int ks_pool_set_max_pages + * + * DESCRIPTION: + * + * Set the maximum number of pages that the library will use. Once it + * hits the limit it will return KS_STATUS_NO_PAGES. + * + * NOTE: if the KS_POOL_FLAG_HEAVY_PACKING is set then this max-pages + * value will include the page with the ks_pool header structure in it. + * If the flag is _not_ set then the max-pages will not include this + * first page. + * + * RETURNS: + * + * Success - KS_STATUS_SUCCESS + * + * Failure - ks_status_t error code + * + * ARGUMENTS: + * + * mp_p <-> Pointer to the memory pool. + * + * max_pages -> Maximum number of pages used by the library. + */ +KS_DECLARE(ks_status_t) ks_pool_set_max_pages(ks_pool_t *mp_p, const unsigned int max_pages); + +/* + * const char *ks_pool_strerror + * + * DESCRIPTION: + * + * Return the corresponding string for the error number. + * + * RETURNS: + * + * Success - String equivalient of the error. + * + * Failure - String "invalid error code" + * + * ARGUMENTS: + * + * error -> Error number that we are converting. + */ +KS_DECLARE(const char *) ks_pool_strerror(const ks_status_t error); + +KS_DECLARE(ks_status_t) ks_pool_set_cleanup(ks_pool_t *mp_p, void *ptr, void *arg, int type, ks_pool_cleanup_fn_t fn); + +#define ks_pool_safe_free(_p, _a) ks_pool_free(_p, _a); (_a) = NULL + +/*<<<<<<<<<< This is end of the auto-generated output from fillproto. */ + +KS_DECLARE(char *) ks_pstrdup(ks_pool_t *pool, const char *str); +KS_DECLARE(char *) ks_pstrndup(ks_pool_t *pool, const char *str, size_t len); +KS_DECLARE(char *) ks_pstrmemdup(ks_pool_t *pool, const char *str, size_t len); +KS_DECLARE(void *) ks_pmemdup(ks_pool_t *pool, const void *buf, size_t len); +KS_DECLARE(char *) ks_pstrcat(ks_pool_t *pool, ...); + +KS_END_EXTERN_C + +#endif /* ! __KS_POOL_H__ */ + +/* For Emacs: + * Local Variables: + * mode:c + * indent-tabs-mode:t + * tab-width:4 + * c-basic-offset:4 + * End: + * For VIM: + * vim:set softtabstop=4 shiftwidth=4 tabstop=4 noet: + */ diff --git a/libs/libks/src/include/ks_printf.h b/libs/libks/src/include/ks_printf.h new file mode 100644 index 0000000000..8e9f975246 --- /dev/null +++ b/libs/libks/src/include/ks_printf.h @@ -0,0 +1,81 @@ +/* +** 2001 September 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +*/ +#ifndef KS_PRINTF_H +#define KS_PRINTF_H + +#include "ks.h" + +KS_BEGIN_EXTERN_C + +/** + * This routine is a variant of the "sprintf()" from the + * standard C library. The resulting string is written into memory + * obtained from malloc() so that there is never a possiblity of buffer + * overflow. This routine also implement some additional formatting + * options that are useful for constructing SQL statements. + * + * The strings returned by this routine should be freed by calling + * free(). + * + * All of the usual printf formatting options apply. In addition, there + * is a "%q" option. %q works like %s in that it substitutes a null-terminated + * string from the argument list. But %q also doubles every '\'' character. + * %q is designed for use inside a string literal. By doubling each '\'' + * character it escapes that character and allows it to be inserted into + * the string. + * + * For example, so some string variable contains text as follows: + * + * char *zText = "It's a happy day!"; + * + * We can use this text in an SQL statement as follows: + * + * char *z = ks_mprintf("INSERT INTO TABLES('%q')", zText); + * ks_core_db_exec(db, z, callback1, 0, 0); + * free(z); + * + * Because the %q format string is used, the '\'' character in zText + * is escaped and the SQL generated is as follows: + * + * INSERT INTO table1 VALUES('It''s a happy day!') + * + * This is correct. Had we used %s instead of %q, the generated SQL + * would have looked like this: + * + * INSERT INTO table1 VALUES('It's a happy day!'); + * + * This second example is an SQL syntax error. As a general rule you + * should always use %q instead of %s when inserting text into a string + * literal. + */ +KS_DECLARE(char *) ks_mprintf(const char *zFormat, ...); +KS_DECLARE(char *) ks_vmprintf(const char *zFormat, va_list ap); +KS_DECLARE(char *) ks_snprintfv(char *zBuf, int n, const char *zFormat, ...); +KS_DECLARE(char *) ks_vsnprintf(char *zbuf, int n, const char *zFormat, va_list ap); +KS_DECLARE(char *) ks_vpprintf(ks_pool_t *pool, const char *zFormat, va_list ap); +KS_DECLARE(char *) ks_pprintf(ks_pool_t *pool, const char *zFormat, ...); + +KS_END_EXTERN_C + +#endif /* KS_PRINTF_H */ + +/* For Emacs: + * Local Variables: + * mode:c + * indent-tabs-mode:t + * tab-width:4 + * c-basic-offset:4 + * End: + * For VIM: + * vim:set softtabstop=4 shiftwidth=4 tabstop=4 noet: + */ diff --git a/libs/libks/src/include/ks_q.h b/libs/libks/src/include/ks_q.h new file mode 100644 index 0000000000..bbe97f8af4 --- /dev/null +++ b/libs/libks/src/include/ks_q.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2007-2015, Anthony Minessale II + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * * Neither the name of the original author; nor the names of any contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER + * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _KS_Q_H_ +#define _KS_Q_H_ + +#include "ks.h" + +KS_BEGIN_EXTERN_C + +KS_DECLARE(ks_status_t) ks_q_pop_timeout(ks_q_t *q, void **ptr, uint32_t timeout); +KS_DECLARE(ks_status_t) ks_q_wake(ks_q_t *q); +KS_DECLARE(ks_status_t) ks_q_flush(ks_q_t *q); +KS_DECLARE(ks_status_t) ks_q_set_flush_fn(ks_q_t *q, ks_flush_fn_t fn, void *flush_data); +KS_DECLARE(ks_status_t) ks_q_wait(ks_q_t *q); +KS_DECLARE(ks_size_t) ks_q_term(ks_q_t *q); +KS_DECLARE(ks_size_t) ks_q_size(ks_q_t *q); +KS_DECLARE(ks_status_t) ks_q_destroy(ks_q_t **qP); +KS_DECLARE(ks_status_t) ks_q_create(ks_q_t **qP, ks_pool_t *pool, ks_size_t maxlen); +KS_DECLARE(ks_status_t) ks_q_push(ks_q_t *q, void *ptr); +KS_DECLARE(ks_status_t) ks_q_trypush(ks_q_t *q, void *ptr); +KS_DECLARE(ks_status_t) ks_q_pop(ks_q_t *q, void **ptr); +KS_DECLARE(ks_status_t) ks_q_trypop(ks_q_t *q, void **ptr); + +KS_END_EXTERN_C + +#endif /* defined(_KS_Q_H_) */ + +/* For Emacs: + * Local Variables: + * mode:c + * indent-tabs-mode:t + * tab-width:4 + * c-basic-offset:4 + * End: + * For VIM: + * vim:set softtabstop=4 shiftwidth=4 tabstop=4 noet: + */ diff --git a/libs/libks/src/include/ks_rng.h b/libs/libks/src/include/ks_rng.h new file mode 100644 index 0000000000..4048995ec7 --- /dev/null +++ b/libs/libks/src/include/ks_rng.h @@ -0,0 +1,56 @@ +/* + * Cross Platform random/uuid abstraction + * Copyright(C) 2015 Michael Jerris + * + * You may opt to use, copy, modify, merge, publish, distribute and/or sell + * copies of the Software, and permit persons to whom the Software is + * furnished to do so. + * + * This work is provided under this license on an "as is" basis, without warranty of any kind, + * either expressed or implied, including, without limitation, warranties that the covered code + * is free of defects, merchantable, fit for a particular purpose or non-infringing. The entire + * risk as to the quality and performance of the covered code is with you. Should any covered + * code prove defective in any respect, you (not the initial developer or any other contributor) + * assume the cost of any necessary servicing, repair or correction. This disclaimer of warranty + * constitutes an essential part of this license. No use of any covered code is authorized hereunder + * except under this disclaimer. + * + */ + +#include "ks.h" + +#ifndef _KS_RNG_H +#define _KS_RNG_H + +KS_BEGIN_EXTERN_C + +#ifdef WIN32 +#include +typedef UUID uuid_t; +#else +#include +#endif + +KS_DECLARE(uuid_t *) ks_uuid(uuid_t *uuid); +KS_DECLARE(char *) ks_uuid_str(ks_pool_t *pool, uuid_t *uuid); +KS_DECLARE(ks_status_t) ks_rng_init(void); +KS_DECLARE(ks_status_t) ks_rng_shutdown(void); +KS_DECLARE(size_t) ks_rng_get_data(uint8_t* buffer, size_t length); +KS_DECLARE(size_t) ks_rng_add_entropy(const uint8_t *buffer, size_t length); +KS_DECLARE(size_t) ks_rng_seed_data(uint8_t *seed, size_t length); + +KS_END_EXTERN_C + +#endif + +/* For Emacs: + * Local Variables: + * mode:c + * indent-tabs-mode:t + * tab-width:4 + * c-basic-offset:4 + * End: + * For VIM: + * vim:set softtabstop=4 shiftwidth=4 tabstop=4 noet: + */ + diff --git a/libs/libks/src/include/ks_socket.h b/libs/libks/src/include/ks_socket.h new file mode 100644 index 0000000000..5d6511e337 --- /dev/null +++ b/libs/libks/src/include/ks_socket.h @@ -0,0 +1,129 @@ +/* + * Copyright (c) 2007-2014, Anthony Minessale II + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * * Neither the name of the original author; nor the names of any contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER + * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _KS_SOCKET_H_ +#define _KS_SOCKET_H_ + +#include "ks.h" + +#ifndef WIN32 +#include +#endif + +KS_BEGIN_EXTERN_C + +#define KS_SO_NONBLOCK 2999 + +#ifdef WIN32 + +static __inline int ks_errno(void) +{ + return WSAGetLastError(); +} + +static __inline int ks_errno_is_blocking(int errcode) +{ + return errcode == WSAEWOULDBLOCK || errcode == WSAEINPROGRESS || errcode == 35 || errcode == 730035; +} + +static __inline int ks_errno_is_interupt(int errcode) +{ + return 0; +} + +#else + +static inline int ks_errno(void) +{ + return errno; +} + +static inline int ks_errno_is_blocking(int errcode) +{ + return errcode == EAGAIN || errcode == EWOULDBLOCK || errcode == EINPROGRESS || errcode == EINTR || errcode == ETIMEDOUT || errcode == 35 || errcode == 730035; +} + +static inline int ks_errno_is_interupt(int errcode) +{ + return errcode == EINTR; +} + +#endif + +static __inline int ks_socket_valid(ks_socket_t s) { + return s != KS_SOCK_INVALID; +} + +#define KS_SA_INIT {AF_INET}; + +KS_DECLARE(ks_status_t) ks_socket_send(ks_socket_t sock, void *data, ks_size_t *datalen); +KS_DECLARE(ks_status_t) ks_socket_recv(ks_socket_t sock, void *data, ks_size_t *datalen); +KS_DECLARE(ks_status_t) ks_socket_sendto(ks_socket_t sock, void *data, ks_size_t *datalen, ks_sockaddr_t *addr); +KS_DECLARE(ks_status_t) ks_socket_recvfrom(ks_socket_t sock, void *data, ks_size_t *datalen, ks_sockaddr_t *addr); + +typedef struct pollfd *ks_ppollfd_t; +KS_DECLARE(int) ks_poll(ks_ppollfd_t fds, uint32_t nfds, int timeout); +KS_DECLARE(ks_status_t) ks_socket_option(ks_socket_t socket, int option_name, ks_bool_t enabled); +KS_DECLARE(ks_status_t) ks_socket_sndbuf(ks_socket_t socket, int bufsize); +KS_DECLARE(ks_status_t) ks_socket_rcvbuf(ks_socket_t socket, int bufsize); +KS_DECLARE(int) ks_wait_sock(ks_socket_t sock, uint32_t ms, ks_poll_t flags); + +KS_DECLARE(ks_socket_t) ks_socket_connect(int type, int protocol, ks_sockaddr_t *addr); +KS_DECLARE(ks_status_t) ks_addr_bind(ks_socket_t server_sock, ks_sockaddr_t *addr); +KS_DECLARE(const char *) ks_addr_get_host(ks_sockaddr_t *addr); +KS_DECLARE(ks_port_t) ks_addr_get_port(ks_sockaddr_t *addr); +KS_DECLARE(int) ks_addr_cmp(const ks_sockaddr_t *sa1, const ks_sockaddr_t *sa2); +KS_DECLARE(ks_status_t) ks_addr_copy(ks_sockaddr_t *addr, const ks_sockaddr_t *src_addr); +KS_DECLARE(ks_status_t) ks_addr_set(ks_sockaddr_t *addr, const char *host, ks_port_t port, int family); +KS_DECLARE(ks_status_t) ks_addr_set_raw(ks_sockaddr_t *addr, void *data, ks_port_t port, int family); +KS_DECLARE(ks_status_t) ks_addr_raw_data(const ks_sockaddr_t *addr, void **data, ks_size_t *datalen); +KS_DECLARE(ks_status_t) ks_listen(const char *host, ks_port_t port, int family, int backlog, ks_listen_callback_t callback, void *user_data); +KS_DECLARE(ks_status_t) ks_socket_shutdown(ks_socket_t sock, int how); +KS_DECLARE(ks_status_t) ks_socket_close(ks_socket_t *sock); +KS_DECLARE(ks_status_t) ks_ip_route(char *buf, int len, const char *route_ip); +KS_DECLARE(ks_status_t) ks_find_local_ip(char *buf, int len, int *mask, int family, const char *route_ip); +KS_DECLARE(ks_status_t) ks_listen_sock(ks_socket_t server_sock, ks_sockaddr_t *addr, int backlog, ks_listen_callback_t callback, void *user_data); +KS_END_EXTERN_C + +#endif /* defined(_KS_SOCKET_H_) */ + +/* For Emacs: + * Local Variables: + * mode:c + * indent-tabs-mode:t + * tab-width:4 + * c-basic-offset:4 + * End: + * For VIM: + * vim:set softtabstop=4 shiftwidth=4 tabstop=4 noet: + */ diff --git a/libs/libks/src/include/ks_ssl.h b/libs/libks/src/include/ks_ssl.h new file mode 100644 index 0000000000..b899c64cc3 --- /dev/null +++ b/libs/libks/src/include/ks_ssl.h @@ -0,0 +1,28 @@ +#ifndef _KS_SSL_H +#define _KS_SSL_H + +#include "ks.h" + +#include +#include + +KS_BEGIN_EXTERN_C + +KS_DECLARE(void) ks_ssl_init_ssl_locks(void); +KS_DECLARE(void) ks_ssl_destroy_ssl_locks(void); +KS_DECLARE(int) ks_gen_cert(const char *dir, const char *file); + +KS_END_EXTERN_C + +#endif /* defined(_KS_SSL_H) */ + +/* For Emacs: + * Local Variables: + * mode:c + * indent-tabs-mode:t + * tab-width:4 + * c-basic-offset:4 + * End: + * For VIM: + * vim:set softtabstop=4 shiftwidth=4 tabstop=4 noet: + */ diff --git a/libs/libks/src/include/ks_threadmutex.h b/libs/libks/src/include/ks_threadmutex.h index fb3eadab21..ceac9e45e8 100644 --- a/libs/libks/src/include/ks_threadmutex.h +++ b/libs/libks/src/include/ks_threadmutex.h @@ -1,6 +1,6 @@ -/* +/* * Cross Platform Thread/Mutex abstraction - * Copyright(C) 2007 Michael Jerris + * Copyright(C) 2015 Michael Jerris * * You may opt to use, copy, modify, merge, publish, distribute and/or sell * copies of the Software, and permit persons to whom the Software is @@ -13,38 +13,122 @@ * code prove defective in any respect, you (not the initial developer or any other contributor) * assume the cost of any necessary servicing, repair or correction. This disclaimer of warranty * constitutes an essential part of this license. No use of any covered code is authorized hereunder - * except under this disclaimer. + * except under this disclaimer. * */ - #ifndef _KS_THREADMUTEX_H #define _KS_THREADMUTEX_H #include "ks.h" -#ifdef __cplusplus -extern "C" { -#endif /* defined(__cplusplus) */ +KS_BEGIN_EXTERN_C -typedef struct ks_mutex ks_mutex_t; -typedef struct ks_thread ks_thread_t; -typedef void *(*ks_thread_function_t) (ks_thread_t *, void *); +#ifdef WIN32 +#include +#define KS_THREAD_CALLING_CONVENTION __stdcall +#else +#include +#define KS_THREAD_CALLING_CONVENTION +#endif -KS_DECLARE(ks_status_t) ks_thread_create_detached(ks_thread_function_t func, void *data); -ks_status_t ks_thread_create_detached_ex(ks_thread_function_t func, void *data, size_t stack_size); -void ks_thread_override_default_stacksize(size_t size); -KS_DECLARE(ks_status_t) ks_mutex_create(ks_mutex_t **mutex); -KS_DECLARE(ks_status_t) ks_mutex_destroy(ks_mutex_t **mutex); -KS_DECLARE(ks_status_t) ks_mutex_lock(ks_mutex_t *mutex); -KS_DECLARE(ks_status_t) ks_mutex_trylock(ks_mutex_t *mutex); -KS_DECLARE(ks_status_t) ks_mutex_unlock(ks_mutex_t *mutex); +#define KS_THREAD_DEFAULT_STACK 240 * 1024 -#ifdef __cplusplus -} -#endif /* defined(__cplusplus) */ + typedef struct ks_thread ks_thread_t; + typedef void *(*ks_thread_function_t) (ks_thread_t *, void *); -#endif /* defined(_KS_THREADMUTEX_H) */ + typedef +#ifdef WIN32 + void * +#else + pthread_t +#endif + ks_thread_os_handle_t; + +struct ks_thread { + ks_pool_t *pool; +#ifdef WIN32 + void *handle; +#else + pthread_t handle; + pthread_attr_t attribute; +#endif + void *private_data; + ks_thread_function_t function; + size_t stack_size; + uint32_t flags; + uint8_t running; + uint8_t priority; + void *return_data; + }; + + typedef enum { + KS_PRI_LOW = 1, + KS_PRI_NORMAL = 10, + KS_PRI_IMPORTANT = 50, + KS_PRI_REALTIME = 99, + } ks_thread_priority_t; + + typedef enum { + KS_THREAD_FLAG_DEFAULT = 0, + KS_THREAD_FLAG_DETATCHED = (1 << 0) + } ks_thread_flags_t; + + KS_DECLARE(int) ks_thread_set_priority(int nice_val); + KS_DECLARE(ks_thread_os_handle_t) ks_thread_self(void); + KS_DECLARE(ks_thread_os_handle_t) ks_thread_os_handle(ks_thread_t *thread); + KS_DECLARE(ks_status_t) ks_thread_create_ex(ks_thread_t **thread, ks_thread_function_t func, void *data, + uint32_t flags, size_t stack_size, ks_thread_priority_t priority, ks_pool_t *pool); + KS_DECLARE(ks_status_t) ks_thread_join(ks_thread_t *thread); + KS_DECLARE(uint8_t) ks_thread_priority(ks_thread_t *thread); + +#define ks_thread_create(thread, func, data, pool) \ + ks_thread_create_ex(thread, func, data, KS_THREAD_FLAG_DEFAULT, KS_THREAD_DEFAULT_STACK, KS_PRI_NORMAL, pool) + + typedef enum { + KS_MUTEX_FLAG_DEFAULT = 0, + KS_MUTEX_FLAG_NON_RECURSIVE = (1 << 0) + } ks_mutex_flags_t; + + typedef struct ks_mutex ks_mutex_t; + + KS_DECLARE(ks_status_t) ks_mutex_create(ks_mutex_t **mutex, unsigned int flags, ks_pool_t *pool); + KS_DECLARE(ks_status_t) ks_mutex_lock(ks_mutex_t *mutex); + KS_DECLARE(ks_status_t) ks_mutex_trylock(ks_mutex_t *mutex); + KS_DECLARE(ks_status_t) ks_mutex_unlock(ks_mutex_t *mutex); + KS_DECLARE(ks_status_t) ks_mutex_destroy(ks_mutex_t **mutex); + + typedef struct ks_cond ks_cond_t; + + KS_DECLARE(ks_status_t) ks_cond_create(ks_cond_t **cond, ks_pool_t *pool); + KS_DECLARE(ks_status_t) ks_cond_create_ex(ks_cond_t **cond, ks_pool_t *pool, ks_mutex_t *mutex); + KS_DECLARE(ks_status_t) ks_cond_lock(ks_cond_t *cond); + KS_DECLARE(ks_status_t) ks_cond_trylock(ks_cond_t *cond); + KS_DECLARE(ks_status_t) ks_cond_unlock(ks_cond_t *cond); + KS_DECLARE(ks_status_t) ks_cond_signal(ks_cond_t *cond); + KS_DECLARE(ks_status_t) ks_cond_broadcast(ks_cond_t *cond); + KS_DECLARE(ks_status_t) ks_cond_try_signal(ks_cond_t *cond); + KS_DECLARE(ks_status_t) ks_cond_try_broadcast(ks_cond_t *cond); + KS_DECLARE(ks_status_t) ks_cond_wait(ks_cond_t *cond); + KS_DECLARE(ks_status_t) ks_cond_timedwait(ks_cond_t *cond, ks_time_t ms); + KS_DECLARE(ks_status_t) ks_cond_destroy(ks_cond_t **cond); + KS_DECLARE(ks_mutex_t *) ks_cond_get_mutex(ks_cond_t *cond); + + typedef struct ks_rwl ks_rwl_t; + + KS_DECLARE(ks_status_t) ks_rwl_create(ks_rwl_t **rwlock, ks_pool_t *pool); + KS_DECLARE(ks_status_t) ks_rwl_read_lock(ks_rwl_t *rwlock); + KS_DECLARE(ks_status_t) ks_rwl_write_lock(ks_rwl_t *rwlock); + KS_DECLARE(ks_status_t) ks_rwl_try_read_lock(ks_rwl_t *rwlock); + KS_DECLARE(ks_status_t) ks_rwl_try_write_lock(ks_rwl_t *rwlock); + KS_DECLARE(ks_status_t) ks_rwl_read_unlock(ks_rwl_t *rwlock); + KS_DECLARE(ks_status_t) ks_rwl_write_unlock(ks_rwl_t *rwlock); + KS_DECLARE(ks_status_t) ks_rwl_destroy(ks_rwl_t **rwlock); + + +KS_END_EXTERN_C + +#endif /* defined(_KS_THREADMUTEX_H) */ /* For Emacs: * Local Variables: diff --git a/libs/libks/src/include/ks_time.h b/libs/libks/src/include/ks_time.h new file mode 100644 index 0000000000..af2c60c892 --- /dev/null +++ b/libs/libks/src/include/ks_time.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2007-2015, Anthony Minessale II + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * * Neither the name of the original author; nor the names of any contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER + * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _KS_TIME_H_ +#define _KS_TIME_H_ + +#include "ks.h" + +KS_BEGIN_EXTERN_C + +#define KS_USEC_PER_SEC 1000000 +#define ks_time_sec(time) ((time) / KS_USEC_PER_SEC) +#define ks_time_usec(time) ((time) % KS_USEC_PER_SEC) +#define ks_time_nsec(time) (((time) % KS_USEC_PER_SEC) * 1000) +#define ks_sleep_ms(_t) ks_sleep(_t * 1000) + +KS_DECLARE(ks_time_t) ks_time_now(void); +KS_DECLARE(ks_time_t) ks_time_now_sec(void); +KS_DECLARE(void) ks_sleep(ks_time_t microsec); + +KS_END_EXTERN_C + +#endif /* defined(_KS_TIME_H_) */ + +/* For Emacs: + * Local Variables: + * mode:c + * indent-tabs-mode:t + * tab-width:4 + * c-basic-offset:4 + * End: + * For VIM: + * vim:set softtabstop=4 shiftwidth=4 tabstop=4 noet: + */ diff --git a/libs/libks/src/include/ks_types.h b/libs/libks/src/include/ks_types.h index ec25e35265..ed5796b215 100644 --- a/libs/libks/src/include/ks_types.h +++ b/libs/libks/src/include/ks_types.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007-2014, Anthony Minessale II + * Copyright (c) 2007-2015, Anthony Minessale II * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -34,113 +34,119 @@ #ifndef _KS_TYPES_H_ #define _KS_TYPES_H_ -#include +#include "ks.h" -#ifdef __cplusplus -extern "C" { -#endif /* defined(__cplusplus) */ +KS_BEGIN_EXTERN_C -#include +#define KS_STR2ENUM_P(_FUNC1, _FUNC2, _TYPE) KS_DECLARE(_TYPE) _FUNC1 (const char *name); KS_DECLARE(const char *) _FUNC2 (_TYPE type); + +#define KS_STR2ENUM(_FUNC1, _FUNC2, _TYPE, _STRINGS, _MAX) \ + KS_DECLARE(_TYPE) _FUNC1 (const char *name) \ + { \ + int i; \ + _TYPE t = _MAX ; \ + \ + for (i = 0; i < _MAX ; i++) { \ + if (!strcasecmp(name, _STRINGS[i])) { \ + t = (_TYPE) i; \ + break; \ + } \ + } \ + \ + return t; \ + } \ + KS_DECLARE(const char *) _FUNC2 (_TYPE type) \ + { \ + if (type > _MAX) { \ + type = _MAX; \ + } \ + return _STRINGS[(int)type]; \ + } \ + +#define KS_ENUM_NAMES(_NAME, _STRINGS) static const char * _NAME [] = { _STRINGS , NULL }; #define KS_VA_NONE "%s", "" + typedef enum { + KS_POLL_READ = (1 << 0), + KS_POLL_WRITE = (1 << 1), + KS_POLL_ERROR = (1 << 2) + } ks_poll_t; -typedef enum { - KS_POLL_READ = (1 << 0), - KS_POLL_WRITE = (1 << 1), - KS_POLL_ERROR = (1 << 2) -} ks_poll_t; + typedef uint16_t ks_port_t; + typedef size_t ks_size_t; -#ifdef WIN32 -#define KS_SEQ_FWHITE FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE | FOREGROUND_INTENSITY -#define KS_SEQ_BWHITE FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE -#define KS_SEQ_FRED FOREGROUND_RED | FOREGROUND_INTENSITY -#define KS_SEQ_BRED FOREGROUND_RED -#define KS_SEQ_FMAGEN FOREGROUND_BLUE | FOREGROUND_RED | FOREGROUND_INTENSITY -#define KS_SEQ_BMAGEN FOREGROUND_BLUE | FOREGROUND_RED -#define KS_SEQ_FCYAN FOREGROUND_GREEN | FOREGROUND_BLUE | FOREGROUND_INTENSITY -#define KS_SEQ_BCYAN FOREGROUND_GREEN | FOREGROUND_BLUE -#define KS_SEQ_FGREEN FOREGROUND_GREEN | FOREGROUND_INTENSITY -#define KS_SEQ_BGREEN FOREGROUND_GREEN -#define KS_SEQ_FYELLOW FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_INTENSITY -#define KS_SEQ_BYELLOW FOREGROUND_RED | FOREGROUND_GREEN -#define KS_SEQ_DEFAULT_COLOR KS_SEQ_FWHITE -#define KS_SEQ_FBLUE FOREGROUND_BLUE | FOREGROUND_INTENSITY -#define KS_SEQ_BBLUE FOREGROUND_BLUE -#define KS_SEQ_FBLACK 0 | FOREGROUND_INTENSITY -#define KS_SEQ_BBLACK 0 -#else -#define KS_SEQ_ESC "\033[" -/* Ansi Control character suffixes */ -#define KS_SEQ_HOME_CHAR 'H' -#define KS_SEQ_HOME_CHAR_STR "H" -#define KS_SEQ_CLEARLINE_CHAR '1' -#define KS_SEQ_CLEARLINE_CHAR_STR "1" -#define KS_SEQ_CLEARLINEEND_CHAR "K" -#define KS_SEQ_CLEARSCR_CHAR0 '2' -#define KS_SEQ_CLEARSCR_CHAR1 'J' -#define KS_SEQ_CLEARSCR_CHAR "2J" -#define KS_SEQ_DEFAULT_COLOR KS_SEQ_ESC KS_SEQ_END_COLOR /* Reset to Default fg/bg color */ -#define KS_SEQ_AND_COLOR ";" /* To add multiple color definitions */ -#define KS_SEQ_END_COLOR "m" /* To end color definitions */ -/* Foreground colors values */ -#define KS_SEQ_F_BLACK "30" -#define KS_SEQ_F_RED "31" -#define KS_SEQ_F_GREEN "32" -#define KS_SEQ_F_YELLOW "33" -#define KS_SEQ_F_BLUE "34" -#define KS_SEQ_F_MAGEN "35" -#define KS_SEQ_F_CYAN "36" -#define KS_SEQ_F_WHITE "37" -/* Background colors values */ -#define KS_SEQ_B_BLACK "40" -#define KS_SEQ_B_RED "41" -#define KS_SEQ_B_GREEN "42" -#define KS_SEQ_B_YELLOW "43" -#define KS_SEQ_B_BLUE "44" -#define KS_SEQ_B_MAGEN "45" -#define KS_SEQ_B_CYAN "46" -#define KS_SEQ_B_WHITE "47" -/* Preset escape sequences - Change foreground colors only */ -#define KS_SEQ_FBLACK KS_SEQ_ESC KS_SEQ_F_BLACK KS_SEQ_END_COLOR -#define KS_SEQ_FRED KS_SEQ_ESC KS_SEQ_F_RED KS_SEQ_END_COLOR -#define KS_SEQ_FGREEN KS_SEQ_ESC KS_SEQ_F_GREEN KS_SEQ_END_COLOR -#define KS_SEQ_FYELLOW KS_SEQ_ESC KS_SEQ_F_YELLOW KS_SEQ_END_COLOR -#define KS_SEQ_FBLUE KS_SEQ_ESC KS_SEQ_F_BLUE KS_SEQ_END_COLOR -#define KS_SEQ_FMAGEN KS_SEQ_ESC KS_SEQ_F_MAGEN KS_SEQ_END_COLOR -#define KS_SEQ_FCYAN KS_SEQ_ESC KS_SEQ_F_CYAN KS_SEQ_END_COLOR -#define KS_SEQ_FWHITE KS_SEQ_ESC KS_SEQ_F_WHITE KS_SEQ_END_COLOR -#define KS_SEQ_BBLACK KS_SEQ_ESC KS_SEQ_B_BLACK KS_SEQ_END_COLOR -#define KS_SEQ_BRED KS_SEQ_ESC KS_SEQ_B_RED KS_SEQ_END_COLOR -#define KS_SEQ_BGREEN KS_SEQ_ESC KS_SEQ_B_GREEN KS_SEQ_END_COLOR -#define KS_SEQ_BYELLOW KS_SEQ_ESC KS_SEQ_B_YELLOW KS_SEQ_END_COLOR -#define KS_SEQ_BBLUE KS_SEQ_ESC KS_SEQ_B_BLUE KS_SEQ_END_COLOR -#define KS_SEQ_BMAGEN KS_SEQ_ESC KS_SEQ_B_MAGEN KS_SEQ_END_COLOR -#define KS_SEQ_BCYAN KS_SEQ_ESC KS_SEQ_B_CYAN KS_SEQ_END_COLOR -#define KS_SEQ_BWHITE KS_SEQ_ESC KS_SEQ_B_WHITE KS_SEQ_END_COLOR -/* Preset escape sequences */ -#define KS_SEQ_HOME KS_SEQ_ESC KS_SEQ_HOME_CHAR_STR -#define KS_SEQ_CLEARLINE KS_SEQ_ESC KS_SEQ_CLEARLINE_CHAR_STR -#define KS_SEQ_CLEARLINEEND KS_SEQ_ESC KS_SEQ_CLEARLINEEND_CHAR -#define KS_SEQ_CLEARSCR KS_SEQ_ESC KS_SEQ_CLEARSCR_CHAR KS_SEQ_HOME -#endif + typedef enum { + KS_STATUS_SUCCESS, + KS_STATUS_FAIL, + KS_STATUS_BREAK, + KS_STATUS_DISCONNECTED, + KS_STATUS_GENERR, + KS_STATUS_INACTIVE, + KS_STATUS_TIMEOUT, + /* Memory pool errors */ + KS_STATUS_ARG_NULL, /* function argument is null */ + KS_STATUS_ARG_INVALID, /* function argument is invalid */ + KS_STATUS_PNT, /* invalid ks_pool pointer */ + KS_STATUS_POOL_OVER, /* ks_pool structure was overwritten */ + KS_STATUS_PAGE_SIZE, /* could not get system page-size */ + KS_STATUS_OPEN_ZERO, /* could not open /dev/zero */ + KS_STATUS_NO_MEM, /* no memory available */ + KS_STATUS_MMAP, /* problems with mmap */ + KS_STATUS_SIZE, /* error processing requested size */ + KS_STATUS_TOO_BIG, /* allocation exceeded max size */ + KS_STATUS_MEM, /* invalid memory address */ + KS_STATUS_MEM_OVER, /* memory lower bounds overwritten */ + KS_STATUS_NOT_FOUND, /* memory block not found in pool */ + KS_STATUS_IS_FREE, /* memory block already free */ + KS_STATUS_BLOCK_STAT, /* invalid internal block status */ + KS_STATUS_FREE_ADDR, /* invalid internal free address */ + KS_STATUS_NO_PAGES, /* ran out of pages in pool */ + KS_STATUS_ALLOC, /* calloc,malloc,free,realloc failed */ + KS_STATUS_PNT_OVER, /* pointer structure was overwritten */ + KS_STATUS_INVALID_POINTER, /* address is not valid */ + /* Always insert new entries above this line*/ + KS_STATUS_COUNT + } ks_status_t; -typedef int16_t ks_port_t; -typedef size_t ks_size_t; +#define STATUS_STRINGS\ + "SUCCESS",\ + "FAIL",\ + "BREAK",\ + "DISCONNECTED",\ + "GENERR",\ + "INACTIVE",\ + "TIMEOUT",\ + "ARG_NULL",\ + "ARG_INVALID",\ + "PNT",\ + "POOL_OVER",\ + "PAGE_SIZE",\ + "OPEN_ZERO",\ + "NO_MEM",\ + "MMAP",\ + "SIZE",\ + "TOO_BIG",\ + "MEM",\ + "MEM_OVER",\ + "NOT_FOUN",\ + "IS_FREE",\ + "BLOCK_STAT",\ + "FREE_ADDR",\ + "NO_PAGES",\ + "ALLOC",\ + "PNT_OVER",\ + "INVALID_POINTER",\ + /* insert new entries before this */\ + "COUNT" -typedef enum { - KS_SUCCESS, - KS_FAIL, - KS_BREAK, - KS_DISCONNECTED, - KS_GENERR -} ks_status_t; + KS_STR2ENUM_P(ks_str2ks_status, ks_status2str, ks_status_t) /*! \brief Used internally for truth test */ -typedef enum { - KS_TRUE = 1, - KS_FALSE = 0 -} ks_bool_t; + typedef enum { + KS_TRUE = 1, + KS_FALSE = 0 + } ks_bool_t; #ifndef __FUNCTION__ #define __FUNCTION__ (const char *)__func__ @@ -164,16 +170,49 @@ typedef enum { #define KS_LOG_CRIT KS_PRE, KS_LOG_LEVEL_CRIT #define KS_LOG_ALERT KS_PRE, KS_LOG_LEVEL_ALERT #define KS_LOG_EMERG KS_PRE, KS_LOG_LEVEL_EMERG -typedef void (*ks_logger_t)(const char *file, const char *func, int line, int level, const char *fmt, ...); -typedef void (*ks_listen_callback_t)(ks_socket_t server_sock, ks_socket_t client_sock, struct sockaddr_in *addr); +struct ks_pool_s; -#ifdef __cplusplus -} -#endif /* defined(__cplusplus) */ +typedef struct ks_pool_s ks_pool_t; +typedef void (*ks_hash_destructor_t)(void *ptr); +typedef enum { + KS_MPCL_ANNOUNCE, + KS_MPCL_TEARDOWN, + KS_MPCL_DESTROY +} ks_pool_cleanup_action_t; -#endif /* defined(_KS_TYPES_H_) */ +typedef enum { + KS_MPCL_FREE, + KS_MPCL_GLOBAL_FREE, +} ks_pool_cleanup_type_t; + +typedef union { + struct sockaddr_in v4; + struct sockaddr_in6 v6; +} ks_sockaddr_in_t; + +typedef struct { + int family; + ks_sockaddr_in_t v; + ks_port_t port; + char host[48]; +} ks_sockaddr_t; + +typedef void (*ks_pool_cleanup_fn_t) (ks_pool_t *mpool, void *ptr, void *arg, int type, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t ctype); + +typedef void (*ks_logger_t) (const char *file, const char *func, int line, int level, const char *fmt, ...); +typedef void (*ks_listen_callback_t) (ks_socket_t server_sock, ks_socket_t client_sock, ks_sockaddr_t *addr, void *user_data); + +typedef int64_t ks_time_t; + +struct ks_q_s; +typedef struct ks_q_s ks_q_t; +typedef void (*ks_flush_fn_t)(ks_q_t *q, void *ptr, void *flush_data); + +KS_END_EXTERN_C + +#endif /* defined(_KS_TYPES_H_) */ /* For Emacs: * Local Variables: @@ -185,4 +224,3 @@ typedef void (*ks_listen_callback_t)(ks_socket_t server_sock, ks_socket_t client * For VIM: * vim:set softtabstop=4 shiftwidth=4 tabstop=4 noet: */ - diff --git a/libs/libks/src/include/ks_utp.h b/libs/libks/src/include/ks_utp.h new file mode 100644 index 0000000000..a39a73d770 --- /dev/null +++ b/libs/libks/src/include/ks_utp.h @@ -0,0 +1,192 @@ +/* + * Copyright (c) 2007-2015, Anthony Minessale II + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * * Neither the name of the original author; nor the names of any contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER + * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _KS_UTP_H_ +#define _KS_UTP_H_ + +#include "ks.h" + +KS_BEGIN_EXTERN_C + +typedef struct utp_socket utp_socket_t; +typedef struct utp_context utp_context_t; + +enum { + UTP_UDP_DONTFRAG = 2, // Used to be a #define as UDP_IP_DONTFRAG +}; + +enum { + /* socket has reveived syn-ack (notification only for outgoing connection completion) this implies writability */ + UTP_STATE_CONNECT = 1, + + /* socket is able to send more data */ + UTP_STATE_WRITABLE = 2, + + /* connection closed */ + UTP_STATE_EOF = 3, + + /* socket is being destroyed, meaning all data has been sent if possible. it is not valid to refer to the socket after this state change occurs */ + UTP_STATE_DESTROYING = 4, +}; + +/* Errors codes that can be passed to UTP_ON_ERROR callback */ +enum { + UTP_ECONNREFUSED = 0, + UTP_ECONNRESET, + UTP_ETIMEDOUT, +}; + +enum { + /* callback names */ + UTP_ON_FIREWALL = 0, + UTP_ON_ACCEPT, + UTP_ON_CONNECT, + UTP_ON_ERROR, + UTP_ON_READ, + UTP_ON_OVERHEAD_STATISTICS, + UTP_ON_STATE_CHANGE, + UTP_GET_READ_BUFFER_SIZE, + UTP_ON_DELAY_SAMPLE, + UTP_GET_UDP_MTU, + UTP_GET_UDP_OVERHEAD, + UTP_GET_MILLISECONDS, + UTP_GET_MICROSECONDS, + UTP_GET_RANDOM, + UTP_LOG, + UTP_SENDTO, + + /* context and socket options that may be set/queried */ + UTP_LOG_NORMAL, + UTP_LOG_MTU, + UTP_LOG_DEBUG, + UTP_SNDBUF, + UTP_RCVBUF, + UTP_TARGET_DELAY, + + /* must be last */ + UTP_ARRAY_SIZE, +}; + +typedef struct { + utp_context_t *context; + utp_socket_t *socket; + size_t len; + uint32_t flags; + int callback_type; + const uint8_t *buf; + + union { + const struct sockaddr *address; + int send; + int sample_ms; + int error_code; + int state; + } d1; + + union { + socklen_t address_len; + int type; + } d2; +} utp_callback_arguments; + +typedef uint64_t utp_callback_t (utp_callback_arguments *); + +/* Returned by utp_get_context_stats() */ +typedef struct { + uint32_t _nraw_recv[5]; // total packets recieved less than 300/600/1200/MTU bytes fpr all connections (context-wide) + uint32_t _nraw_send[5]; // total packets sent less than 300/600/1200/MTU bytes for all connections (context-wide) +} utp_context_stats; + +// Returned by utp_get_stats() +typedef struct { + uint64_t nbytes_recv; // total bytes received + uint64_t nbytes_xmit; // total bytes transmitted + uint32_t rexmit; // retransmit counter + uint32_t fastrexmit; // fast retransmit counter + uint32_t nxmit; // transmit counter + uint32_t nrecv; // receive counter (total) + uint32_t nduprecv; // duplicate receive counter + uint32_t mtu_guess; // Best guess at MTU +} utp_socket_stats; + +#define UTP_IOV_MAX 1024 + +/* For utp_writev, to writes data from multiple buffers */ +struct utp_iovec { + void *iov_base; + size_t iov_len; +}; + +// Public Functions +utp_context_t* utp_init (int version); +void utp_destroy (utp_context_t *ctx); +void utp_set_callback (utp_context_t *ctx, int callback_name, utp_callback_t *proc); +void* utp_context_set_userdata (utp_context_t *ctx, void *userdata); +void* utp_context_get_userdata (utp_context_t *ctx); +int utp_context_set_option (utp_context_t *ctx, int opt, int val); +int utp_context_get_option (utp_context_t *ctx, int opt); +int utp_process_udp (utp_context_t *ctx, const uint8_t *buf, size_t len, const struct sockaddr *to, socklen_t tolen); +int utp_process_icmp_error (utp_context_t *ctx, const uint8_t *buffer, size_t len, const struct sockaddr *to, socklen_t tolen); +int utp_process_icmp_fragmentation (utp_context_t *ctx, const uint8_t *buffer, size_t len, const struct sockaddr *to, socklen_t tolen, uint16_t next_hop_mtu); +void utp_check_timeouts (utp_context_t *ctx); +void utp_issue_deferred_acks (utp_context_t *ctx); +utp_context_stats* utp_get_context_stats (utp_context_t *ctx); +utp_socket_t* utp_create_socket (utp_context_t *ctx); +void* utp_set_userdata (utp_socket_t *s, void *userdata); +void* utp_get_userdata (utp_socket_t *s); +int utp_setsockopt (utp_socket_t *s, int opt, int val); +int utp_getsockopt (utp_socket_t *s, int opt); +int utp_connect (utp_socket_t *s, const struct sockaddr *to, socklen_t tolen); +ssize_t utp_write (utp_socket_t *s, void *buf, size_t count); +ssize_t utp_writev (utp_socket_t *s, struct utp_iovec *iovec, size_t num_iovecs); +int utp_getpeername (utp_socket_t *s, struct sockaddr *addr, socklen_t *addrlen); +void utp_read_drained (utp_socket_t *s); +int utp_get_delays (utp_socket_t *s, uint32_t *ours, uint32_t *theirs, uint32_t *age); +utp_socket_stats* utp_get_stats (utp_socket_t *s); +utp_context_t* utp_get_context (utp_socket_t *s); +void utp_close (utp_socket_t *s); + +KS_END_EXTERN_C + +#endif /* defined(_KS_UTP_H_) */ + +/* For Emacs: + * Local Variables: + * mode:c + * indent-tabs-mode:t + * tab-width:4 + * c-basic-offset:4 + * End: + * For VIM: + * vim:set softtabstop=4 shiftwidth=4 tabstop=4 noet: + */ diff --git a/libs/libks/src/include/kws.h b/libs/libks/src/include/kws.h new file mode 100644 index 0000000000..2ffe523c92 --- /dev/null +++ b/libs/libks/src/include/kws.h @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2007-2014, Anthony Minessale II + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * * Neither the name of the original author; nor the names of any contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER + * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _KWS_H +#define _KWS_H + +#define WEBSOCKET_GUID "258EAFA5-E914-47DA-95CA-C5AB0DC85B11" +#define B64BUFFLEN 1024 +#include "ks.h" + +KS_BEGIN_EXTERN_C + +typedef enum { + WS_NONE = 0, + WS_NORMAL = 1000, + WS_PROTO_ERR = 1002, + WS_DATA_TOO_BIG = 1009 +} kws_cause_t; + +typedef enum { + WSOC_CONTINUATION = 0x0, + WSOC_TEXT = 0x1, + WSOC_BINARY = 0x2, + WSOC_CLOSE = 0x8, + WSOC_PING = 0x9, + WSOC_PONG = 0xA +} kws_opcode_t; + +typedef enum { + KWS_CLIENT, + KWS_SERVER +} kws_type_t; + +typedef enum { + KWS_CLOSE_SOCK, + KWS_BLOCK, + KWS_STAY_OPEN +} kws_flag_t; + +struct kws_s; +typedef struct kws_s kws_t; + + +KS_DECLARE(ks_ssize_t) kws_read_frame(kws_t *kws, kws_opcode_t *oc, uint8_t **data); +KS_DECLARE(ks_ssize_t) kws_write_frame(kws_t *kws, kws_opcode_t oc, void *data, ks_size_t bytes); +KS_DECLARE(ks_ssize_t) kws_raw_read(kws_t *kws, void *data, ks_size_t bytes, int block); +KS_DECLARE(ks_ssize_t) kws_raw_write(kws_t *kws, void *data, ks_size_t bytes); +KS_DECLARE(ks_status_t) kws_init(kws_t **kwsP, ks_socket_t sock, SSL_CTX *ssl_ctx, const char *client_data, kws_flag_t flags, ks_pool_t *pool); +KS_DECLARE(ks_ssize_t) kws_close(kws_t *kws, int16_t reason); +KS_DECLARE(void) kws_destroy(kws_t **kwsP); +KS_DECLARE(ks_status_t) kws_get_buffer(kws_t *kws, char **bufP, ks_size_t *buflen); + + + +#if 0 +static inline uint64_t get_unaligned_uint64(const void *p) +{ + const struct { uint64_t d; } __attribute__((packed)) *pp = p; + return pp->d; +} +#endif + +KS_END_EXTERN_C + +#endif /* defined(_KWS_H_) */ + +/* For Emacs: + * Local Variables: + * mode:c + * indent-tabs-mode:t + * tab-width:4 + * c-basic-offset:4 + * End: + * For VIM: + * vim:set softtabstop=4 shiftwidth=4 tabstop=4 noet: + */ diff --git a/libs/libks/src/include/simclist.h b/libs/libks/src/include/simclist.h index 1b99ad9c43..d22587df33 100755 --- a/libs/libks/src/include/simclist.h +++ b/libs/libks/src/include/simclist.h @@ -32,20 +32,20 @@ extern "C" { #include #ifndef SIMCLIST_NO_DUMPRESTORE -# ifndef _WIN32 -# include /* list_dump_info_t's struct timeval */ -# else -# include -# endif +#ifndef _WIN32 +#include /* list_dump_info_t's struct timeval */ +#else +#include +#endif #endif /* Be friend of both C90 and C99 compilers */ #if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - /* "inline" and "restrict" are keywords */ + /* "inline" and "restrict" are keywords */ #else -# define inline /* inline */ -# define restrict /* restrict */ +#define inline /* inline */ +#define restrict /* restrict */ #endif @@ -58,13 +58,13 @@ extern "C" { #ifndef SIMCLIST_NO_DUMPRESTORE typedef struct { - uint16_t version; /* dump version */ - struct timeval timestamp; /* when the list has been dumped, seconds since UNIX epoch */ + uint16_t version; /* dump version */ + struct timeval timestamp; /* when the list has been dumped, seconds since UNIX epoch */ uint32_t list_size; uint32_t list_numels; - list_hash_t list_hash; /* hash of the list when dumped, or 0 if invalid */ + list_hash_t list_hash; /* hash of the list when dumped, or 0 if invalid */ uint32_t dumpsize; - int consistent; /* 1 if the dump is verified complete/consistent; 0 otherwise */ + int consistent; /* 1 if the dump is verified complete/consistent; 0 otherwise */ } list_dump_info_t; #endif @@ -77,7 +77,7 @@ extern "C" { * * It is responsability of the function to handle possible NULL values. */ - typedef int (*element_comparator)(const void *a, const void *b); + typedef int (*element_comparator) (const void *a, const void *b); /** * a seeker of elements. @@ -90,7 +90,7 @@ extern "C" { * It is responsability of the function to handle possible NULL values in any * argument. */ - typedef int (*element_seeker)(const void *el, const void *indicator); + typedef int (*element_seeker) (const void *el, const void *indicator); /** * an element lenght meter. @@ -101,7 +101,7 @@ extern "C" { * * It is responsability of the function to handle possible NULL values. */ - typedef size_t (*element_meter)(const void *el); + typedef size_t (*element_meter) (const void *el); /** * a function computing the hash of elements. @@ -112,7 +112,7 @@ extern "C" { * * It is responsability of the function to handle possible NULL values. */ - typedef list_hash_t (*element_hash_computer)(const void *el); + typedef list_hash_t (*element_hash_computer) (const void *el); /** * a function for serializing an element. @@ -132,7 +132,7 @@ extern "C" { * @param serialize_buffer reference to fill with the length of the buffer * @return reference to the buffer with the serialized data */ - typedef void *(*element_serializer)(const void *restrict el, uint32_t *restrict serializ_len); + typedef void *(*element_serializer) (const void *restrict el, uint32_t *restrict serializ_len); /** * a function for un-serializing an element. @@ -149,7 +149,7 @@ extern "C" { * @param data_len reference to the location where to store the length of the data in the buffer returned * @return reference to a buffer with the original, unserialized representation of the element */ - typedef void *(*element_unserializer)(const void *restrict data, uint32_t *restrict data_len); + typedef void *(*element_unserializer) (const void *restrict data, uint32_t *restrict data_len); /* [private-use] list entry -- olds actual user datum */ struct list_entry_s { @@ -776,49 +776,49 @@ extern "C" { * ready-made comparator for int8_t elements. * @see list_attributes_comparator() */ - int list_comparator_int8_t(const void *a, const void *b); + int list_comparator_int8_t (const void *a, const void *b); /** * ready-made comparator for int16_t elements. * @see list_attributes_comparator() */ - int list_comparator_int16_t(const void *a, const void *b); + int list_comparator_int16_t (const void *a, const void *b); /** * ready-made comparator for int32_t elements. * @see list_attributes_comparator() */ - int list_comparator_int32_t(const void *a, const void *b); + int list_comparator_int32_t (const void *a, const void *b); /** * ready-made comparator for int64_t elements. * @see list_attributes_comparator() */ - int list_comparator_int64_t(const void *a, const void *b); + int list_comparator_int64_t (const void *a, const void *b); /** * ready-made comparator for uint8_t elements. * @see list_attributes_comparator() */ - int list_comparator_uint8_t(const void *a, const void *b); + int list_comparator_uint8_t (const void *a, const void *b); /** * ready-made comparator for uint16_t elements. * @see list_attributes_comparator() */ - int list_comparator_uint16_t(const void *a, const void *b); + int list_comparator_uint16_t (const void *a, const void *b); /** * ready-made comparator for uint32_t elements. * @see list_attributes_comparator() */ - int list_comparator_uint32_t(const void *a, const void *b); + int list_comparator_uint32_t (const void *a, const void *b); /** * ready-made comparator for uint64_t elements. * @see list_attributes_comparator() */ - int list_comparator_uint64_t(const void *a, const void *b); + int list_comparator_uint64_t (const void *a, const void *b); /** * ready-made comparator for float elements. @@ -843,49 +843,49 @@ extern "C" { * ready-made metric function for int8_t elements. * @see list_attributes_copy() */ - size_t list_meter_int8_t(const void *el); + size_t list_meter_int8_t (const void *el); /** * ready-made metric function for int16_t elements. * @see list_attributes_copy() */ - size_t list_meter_int16_t(const void *el); + size_t list_meter_int16_t (const void *el); /** * ready-made metric function for int32_t elements. * @see list_attributes_copy() */ - size_t list_meter_int32_t(const void *el); + size_t list_meter_int32_t (const void *el); /** * ready-made metric function for int64_t elements. * @see list_attributes_copy() */ - size_t list_meter_int64_t(const void *el); + size_t list_meter_int64_t (const void *el); /** * ready-made metric function for uint8_t elements. * @see list_attributes_copy() */ - size_t list_meter_uint8_t(const void *el); + size_t list_meter_uint8_t (const void *el); /** * ready-made metric function for uint16_t elements. * @see list_attributes_copy() */ - size_t list_meter_uint16_t(const void *el); + size_t list_meter_uint16_t (const void *el); /** * ready-made metric function for uint32_t elements. * @see list_attributes_copy() */ - size_t list_meter_uint32_t(const void *el); + size_t list_meter_uint32_t (const void *el); /** * ready-made metric function for uint64_t elements. * @see list_attributes_copy() */ - size_t list_meter_uint64_t(const void *el); + size_t list_meter_uint64_t (const void *el); /** * ready-made metric function for float elements. @@ -975,9 +975,7 @@ extern "C" { #ifdef __cplusplus } #endif - #endif - /* For Emacs: * Local Variables: * mode:c diff --git a/libs/libks/src/ks.c b/libs/libks/src/ks.c index 2747bd43e9..656714e89b 100644 --- a/libs/libks/src/ks.c +++ b/libs/libks/src/ks.c @@ -31,346 +31,78 @@ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ - -/* Use select on windows and poll everywhere else. - Select is the devil. Especially if you are doing a lot of small socket connections. - If your FD number is bigger than 1024 you will silently create memory corruption. - - If you have build errors on your platform because you don't have poll find a way to detect it and #define KS_USE_SELECT and #undef KS_USE_POLL - All of this will be upgraded to autoheadache eventually. -*/ - -/* TBD for win32 figure out how to tell if you have WSAPoll (vista or higher) and use it when available by #defining KS_USE_WSAPOLL (see below) */ - -#ifdef _MSC_VER -#define FD_SETSIZE 8192 -#define KS_USE_SELECT -#else -#define KS_USE_POLL -#endif - #include -#ifndef WIN32 -#define closesocket(x) shutdown(x, 2); close(x) -#include -#include -#else -#pragma warning (disable:6386) -/* These warnings need to be ignored warning in sdk header */ -#include -#include -#pragma comment(lib, "Ws2_32.lib") -#ifndef errno -#define errno WSAGetLastError() -#endif -#ifndef EINTR -#define EINTR WSAEINTR -#endif -#pragma warning (default:6386) -#endif -#ifdef KS_USE_POLL -#include -#endif - -#ifndef KS_MIN -#define KS_MIN(x,y) ((x) < (y) ? (x) : (y)) -#endif -#ifndef KS_MAX -#define KS_MAX(x,y) ((x) > (y) ? (x) : (y)) -#endif -#ifndef KS_CLAMP -#define KS_CLAMP(min,max,val) (KS_MIN(max,KS_MAX(val,min))) -#endif +static ks_pool_t *pool = NULL; -/* Written by Marc Espie, public domain */ -#define KS_CTYPE_NUM_CHARS 256 -const short _ks_C_toupper_[1 + KS_CTYPE_NUM_CHARS] = { - EOF, - 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, - 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, - 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, - 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, - 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, - 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, - 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, - 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, - 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, - 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, - 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, - 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, - 0x60, 'A', 'B', 'C', 'D', 'E', 'F', 'G', - 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', - 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', - 'X', 'Y', 'Z', 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, - 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, - 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, - 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, - 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, - 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, - 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, - 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, - 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, - 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, - 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, - 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, - 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, - 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, - 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, - 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, - 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff -}; - -const short *_ks_toupper_tab_ = _ks_C_toupper_; - -KS_DECLARE(int) ks_toupper(int c) +KS_DECLARE(void) ks_random_string(char *buf, uint16_t len, char *set) { - if ((unsigned int)c > 255) - return(c); - if (c < -1) - return EOF; - return((_ks_toupper_tab_ + 1)[c]); -} + char chars[] = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"; + int max; + uint16_t x; -const short _ks_C_tolower_[1 + KS_CTYPE_NUM_CHARS] = { - EOF, - 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, - 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, - 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, - 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, - 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, - 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, - 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, - 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, - 0x40, 'a', 'b', 'c', 'd', 'e', 'f', 'g', - 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', - 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', - 'x', 'y', 'z', 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, - 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, - 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, - 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, - 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, - 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, - 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, - 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, - 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, - 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, - 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, - 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, - 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, - 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, - 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, - 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, - 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, - 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, - 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, - 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, - 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff -}; - -const short *_ks_tolower_tab_ = _ks_C_tolower_; - -KS_DECLARE(int) ks_tolower(int c) -{ - if ((unsigned int)c > 255) - return(c); - if (c < -1) - return EOF; - return((_ks_tolower_tab_ + 1)[c]); -} - -KS_DECLARE(const char *)ks_stristr(const char *instr, const char *str) -{ -/* -** Rev History: 16/07/97 Greg Thayer Optimized -** 07/04/95 Bob Stout ANSI-fy -** 02/03/94 Fred Cole Original -** 09/01/03 Bob Stout Bug fix (lines 40-41) per Fred Bulback -** -** Hereby donated to public domain. -*/ - const char *pptr, *sptr, *start; - - if (!str || !instr) - return NULL; - - for (start = str; *start; start++) { - /* find start of pattern in string */ - for (; ((*start) && (ks_toupper(*start) != ks_toupper(*instr))); start++); - - if (!*start) - return NULL; - - pptr = instr; - sptr = start; - - while (ks_toupper(*sptr) == ks_toupper(*pptr)) { - sptr++; - pptr++; - - /* if end of pattern then pattern was found */ - if (!*pptr) - return (start); - - if (!*sptr) - return NULL; - } - } - return NULL; -} - -#ifdef WIN32 -#ifndef vsnprintf -#define vsnprintf _vsnprintf -#endif -#endif - - -int vasprintf(char **ret, const char *format, va_list ap); - -KS_DECLARE(int) ks_vasprintf(char **ret, const char *fmt, va_list ap) -{ -#if !defined(WIN32) && !defined(__sun) - return vasprintf(ret, fmt, ap); -#else - char *buf; - int len; - size_t buflen; - va_list ap2; - char *tmp = NULL; - -#ifdef _MSC_VER -#if _MSC_VER >= 1500 - /* hack for incorrect assumption in msvc header files for code analysis */ - __analysis_assume(tmp); -#endif - ap2 = ap; -#else - va_copy(ap2, ap); -#endif - - len = vsnprintf(tmp, 0, fmt, ap2); - - if (len > 0 && (buf = malloc((buflen = (size_t) (len + 1)))) != NULL) { - len = vsnprintf(buf, buflen, fmt, ap); - *ret = buf; - } else { - *ret = NULL; - len = -1; + if (!set) { + set = chars; } - va_end(ap2); - return len; -#endif -} + max = (int) strlen(set); - - - -KS_DECLARE(int) ks_snprintf(char *buffer, size_t count, const char *fmt, ...) -{ - va_list ap; - int ret; - - va_start(ap, fmt); - ret = vsnprintf(buffer, count-1, fmt, ap); - if (ret < 0) - buffer[count-1] = '\0'; - va_end(ap); - return ret; -} - -static void null_logger(const char *file, const char *func, int line, int level, const char *fmt, ...) -{ - if (file && func && line && level && fmt) { - return; + for (x = 0; x < len; x++) { + int j = (int) (max * 1.0 * rand() / (RAND_MAX + 1.0)); + buf[x] = set[j]; } - return; } -static const char *LEVEL_NAMES[] = { - "EMERG", - "ALERT", - "CRIT", - "ERROR", - "WARNING", - "NOTICE", - "INFO", - "DEBUG", - NULL -}; - -static int ks_log_level = 7; - -static const char *cut_path(const char *in) +KS_DECLARE(ks_status_t) ks_global_set_cleanup(ks_pool_cleanup_fn_t fn, void *arg) { - const char *p, *ret = in; - char delims[] = "/\\"; - char *i; - - for (i = delims; *i; i++) { - p = in; - while ((p = strchr(p, *i)) != 0) { - ret = ++p; - } - } - return ret; + return ks_pool_set_cleanup(ks_global_pool(), NULL, arg, 0, fn); } - - -static void default_logger(const char *file, const char *func, int line, int level, const char *fmt, ...) -{ - const char *fp; - char *data; - va_list ap; - int ret; - if (level < 0 || level > 7) { - level = 7; - } - if (level > ks_log_level) { - return; +KS_DECLARE(ks_status_t) ks_init(void) +{ + + srand(getpid() * (intptr_t)&pool + time(NULL)); + ks_ssl_init_ssl_locks(); + ks_global_pool(); + ks_rng_init(); + + return KS_STATUS_SUCCESS; +} + +KS_DECLARE(ks_status_t) ks_shutdown(void) +{ + ks_status_t status = KS_STATUS_SUCCESS; + + ks_ssl_destroy_ssl_locks(); + //ks_rng_shutdown(); + + if (pool) { + status = ks_pool_close(&pool); } - fp = cut_path(file); - - va_start(ap, fmt); - - ret = ks_vasprintf(&data, fmt, ap); - - if (ret != -1) { - fprintf(stderr, "[%s] %s:%d %s() %s", LEVEL_NAMES[level], fp, line, func, data); - free(data); - } - - va_end(ap); - + return status; } -ks_logger_t ks_log = null_logger; - -KS_DECLARE(void) ks_global_set_logger(ks_logger_t logger) +KS_DECLARE(ks_pool_t *) ks_global_pool(void) { - if (logger) { - ks_log = logger; - } else { - ks_log = null_logger; - } -} -KS_DECLARE(void) ks_global_set_default_logger(int level) -{ - if (level < 0 || level > 7) { - level = 7; + ks_status_t status; + + if (!pool) { + if ((status = ks_pool_open(&pool)) != KS_STATUS_SUCCESS) { + abort(); + } } - ks_log = default_logger; - ks_log_level = level; + return pool; } +KS_ENUM_NAMES(STATUS_NAMES, STATUS_STRINGS) +KS_STR2ENUM(ks_str2ks_status, ks_status2str, ks_status_t, STATUS_NAMES, KS_STATUS_COUNT) + KS_DECLARE(size_t) ks_url_encode(const char *url, char *buf, size_t len) { const char *p; @@ -408,7 +140,7 @@ KS_DECLARE(size_t) ks_url_encode(const char *url, char *buf, size_t len) return x; } -KS_DECLARE(char *)ks_url_decode(char *s) +KS_DECLARE(char *) ks_url_decode(char *s) { char *o; unsigned int tmp; @@ -425,316 +157,30 @@ KS_DECLARE(char *)ks_url_decode(char *s) return s; } - -static int ks_socket_reuseaddr(ks_socket_t socket) +KS_DECLARE(int) ks_cpu_count(void) { -#ifdef WIN32 - BOOL reuse_addr = TRUE; - return setsockopt(socket, SOL_SOCKET, SO_REUSEADDR, (char *)&reuse_addr, sizeof(reuse_addr)); -#else - int reuse_addr = 1; - return setsockopt(socket, SOL_SOCKET, SO_REUSEADDR, &reuse_addr, sizeof(reuse_addr)); -#endif -} - - -struct thread_handler { - ks_listen_callback_t callback; - ks_socket_t server_sock; - ks_socket_t client_sock; - struct sockaddr_in addr; -}; - -static void *client_thread(ks_thread_t *me, void *obj) -{ - struct thread_handler *handler = (struct thread_handler *) obj; - - handler->callback(handler->server_sock, handler->client_sock, &handler->addr); - free(handler); - - return NULL; - -} - -KS_DECLARE(ks_status_t) ks_listen(const char *host, ks_port_t port, ks_listen_callback_t callback) -{ - ks_socket_t server_sock = KS_SOCK_INVALID; - struct sockaddr_in addr; - ks_status_t status = KS_SUCCESS; - - if ((server_sock = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP)) < 0) { - return KS_FAIL; - } - - ks_socket_reuseaddr(server_sock); - - memset(&addr, 0, sizeof(addr)); - addr.sin_family = AF_INET; - addr.sin_addr.s_addr = htonl(INADDR_ANY); - addr.sin_port = htons(port); - - if (bind(server_sock, (struct sockaddr *) &addr, sizeof(addr)) < 0) { - status = KS_FAIL; - goto end; - } - - if (listen(server_sock, 10000) < 0) { - status = KS_FAIL; - goto end; - } - - for (;;) { - int client_sock; - struct sockaddr_in echoClntAddr; -#ifdef WIN32 - int clntLen; -#else - unsigned int clntLen; -#endif - - clntLen = sizeof(echoClntAddr); - - if ((client_sock = accept(server_sock, (struct sockaddr *) &echoClntAddr, &clntLen)) == KS_SOCK_INVALID) { - status = KS_FAIL; - goto end; - } - - callback(server_sock, client_sock, &echoClntAddr); - } - - end: - - if (server_sock != KS_SOCK_INVALID) { - closesocket(server_sock); - server_sock = KS_SOCK_INVALID; - } - - return status; - -} - -KS_DECLARE(ks_status_t) ks_listen_threaded(const char *host, ks_port_t port, ks_listen_callback_t callback, int max) -{ - ks_socket_t server_sock = KS_SOCK_INVALID; - struct sockaddr_in addr; - ks_status_t status = KS_SUCCESS; - struct thread_handler *handler = NULL; - - if ((server_sock = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP)) < 0) { - return KS_FAIL; - } - - ks_socket_reuseaddr(server_sock); - - memset(&addr, 0, sizeof(addr)); - addr.sin_family = AF_INET; - addr.sin_addr.s_addr = htonl(INADDR_ANY); - addr.sin_port = htons(port); - - if (bind(server_sock, (struct sockaddr *) &addr, sizeof(addr)) < 0) { - status = KS_FAIL; - goto end; - } - - if (listen(server_sock, max) < 0) { - status = KS_FAIL; - goto end; - } - - for (;;) { - int client_sock; - struct sockaddr_in echoClntAddr; -#ifdef WIN32 - int clntLen; -#else - unsigned int clntLen; -#endif - - clntLen = sizeof(echoClntAddr); - - if ((client_sock = accept(server_sock, (struct sockaddr *) &echoClntAddr, &clntLen)) == KS_SOCK_INVALID) { - status = KS_FAIL; - goto end; - } - - handler = malloc(sizeof(*handler)); - ks_assert(handler); - - memset(handler, 0, sizeof(*handler)); - handler->callback = callback; - handler->server_sock = server_sock; - handler->client_sock = client_sock; - handler->addr = echoClntAddr; - - ks_thread_create_detached(client_thread, handler); - } - - end: - - if (server_sock != KS_SOCK_INVALID) { - closesocket(server_sock); - server_sock = KS_SOCK_INVALID; - } - - return status; - -} - - -/* USE WSAPoll on vista or higher */ -#ifdef KS_USE_WSAPOLL -KS_DECLARE(int) ks_wait_sock(ks_socket_t sock, uint32_t ms, ks_poll_t flags) -{ -} -#endif - - -#ifdef KS_USE_SELECT -#ifdef WIN32 -#pragma warning( push ) -#pragma warning( disable : 6262 ) /* warning C6262: Function uses '98348' bytes of stack: exceeds /analyze:stacksize'16384'. Consider moving some data to heap */ -#endif -KS_DECLARE(int) ks_wait_sock(ks_socket_t sock, uint32_t ms, ks_poll_t flags) -{ - int s = 0, r = 0; - fd_set rfds; - fd_set wfds; - fd_set efds; - struct timeval tv; - - FD_ZERO(&rfds); - FD_ZERO(&wfds); - FD_ZERO(&efds); + int cpu_count; #ifndef WIN32 - /* Wouldn't you rather know?? */ - assert(sock <= FD_SETSIZE); -#endif - - if ((flags & KS_POLL_READ)) { - -#ifdef WIN32 -#pragma warning( push ) -#pragma warning( disable : 4127 ) - FD_SET(sock, &rfds); -#pragma warning( pop ) + cpu_count = sysconf (_SC_NPROCESSORS_ONLN); #else - FD_SET(sock, &rfds); -#endif + { + SYSTEM_INFO sysinfo; + GetSystemInfo( &sysinfo ); + cpu_count = sysinfo.dwNumberOfProcessors; } - - if ((flags & KS_POLL_WRITE)) { - -#ifdef WIN32 -#pragma warning( push ) -#pragma warning( disable : 4127 ) - FD_SET(sock, &wfds); -#pragma warning( pop ) -#else - FD_SET(sock, &wfds); #endif - } - - if ((flags & KS_POLL_ERROR)) { - -#ifdef WIN32 -#pragma warning( push ) -#pragma warning( disable : 4127 ) - FD_SET(sock, &efds); -#pragma warning( pop ) -#else - FD_SET(sock, &efds); -#endif - } - - tv.tv_sec = ms / 1000; - tv.tv_usec = (ms % 1000) * ms; - s = select(sock + 1, (flags & KS_POLL_READ) ? &rfds : NULL, (flags & KS_POLL_WRITE) ? &wfds : NULL, (flags & KS_POLL_ERROR) ? &efds : NULL, &tv); - - if (s < 0) { - r = s; - } else if (s > 0) { - if ((flags & KS_POLL_READ) && FD_ISSET(sock, &rfds)) { - r |= KS_POLL_READ; - } - - if ((flags & KS_POLL_WRITE) && FD_ISSET(sock, &wfds)) { - r |= KS_POLL_WRITE; - } - - if ((flags & KS_POLL_ERROR) && FD_ISSET(sock, &efds)) { - r |= KS_POLL_ERROR; - } - } - - return r; - -} -#ifdef WIN32 -#pragma warning( pop ) -#endif -#endif - -#ifdef KS_USE_POLL -KS_DECLARE(int) ks_wait_sock(ks_socket_t sock, uint32_t ms, ks_poll_t flags) -{ - struct pollfd pfds[2] = { { 0 } }; - int s = 0, r = 0; - - pfds[0].fd = sock; - - if ((flags & KS_POLL_READ)) { - pfds[0].events |= POLLIN; - } - - if ((flags & KS_POLL_WRITE)) { - pfds[0].events |= POLLOUT; - } - - if ((flags & KS_POLL_ERROR)) { - pfds[0].events |= POLLERR; - } - - s = poll(pfds, 1, ms); - - if (s < 0) { - r = s; - } else if (s > 0) { - if ((pfds[0].revents & POLLIN)) { - r |= KS_POLL_READ; - } - if ((pfds[0].revents & POLLOUT)) { - r |= KS_POLL_WRITE; - } - if ((pfds[0].revents & POLLERR)) { - r |= KS_POLL_ERROR; - } - } - - return r; - -} -#endif - - -KS_DECLARE(unsigned int) ks_separate_string_string(char *buf, const char *delim, char **array, unsigned int arraylen) -{ - unsigned int count = 0; - char *d; - size_t dlen = strlen(delim); - - array[count++] = buf; - - while (count < arraylen && array[count - 1]) { - if ((d = strstr(array[count - 1], delim))) { - *d = '\0'; - d += dlen; - array[count++] = d; - } else - break; - } - - return count; + return cpu_count; } +/* For Emacs: + * Local Variables: + * mode:c + * indent-tabs-mode:t + * tab-width:4 + * c-basic-offset:4 + * End: + * For VIM: + * vim:set softtabstop=4 shiftwidth=4 tabstop=4 noet: + */ diff --git a/libs/libks/src/ks_bencode.c b/libs/libks/src/ks_bencode.c new file mode 100644 index 0000000000..e6f4abb3e1 --- /dev/null +++ b/libs/libks/src/ks_bencode.c @@ -0,0 +1,2697 @@ +/* + * libbencodetools + * + * Written by Heikki Orsila and + * Janne Kulmala in 2011. + */ +/* +Copyright (C) 2002-2005 Bram Cohen and Ross Cohen + +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. +* Neither the name of Codeville nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + */ + +#include + +#include +#include +#include +#include +#include +#include +#include + +#define die(fmt, ...) /*fprintf(stderr, "bencode: fatal error: " fmt, __VA_ARGS__ ); abort()*/ abort() +#define warn(fmt, ...) /*fprintf(stderr, "bencode: warning: " fmt, __VA_ARGS__ )*/ + +#define MAX_ALLOC (((size_t) -1) / sizeof(struct bencode *) / 2) +#define DICT_MAX_ALLOC (((size_t) -1) / sizeof(struct bencode_dict_node) / 2) + +struct ben_decode_ctx { + const char *data; + const size_t len; + size_t off; + int error; + int level; + char c; + int line; + struct bencode_type **types; +}; + +struct ben_encode_ctx { + char *data; + size_t size; + size_t pos; +}; + +/* + * Buffer size for fitting all unsigned long long and long long integers, + * assuming it is at most 64 bits. If long long is larger than 64 bits, + * an error is produced when too large an integer is converted. + */ +#define LONGLONGSIZE 21 + +static struct bencode *decode_printed(struct ben_decode_ctx *ctx); +static void inplace_ben_str(struct bencode_str *b, const char *s, size_t len); +static int resize_dict(struct bencode_dict *d, size_t newalloc); +static int resize_list(struct bencode_list *list, size_t newalloc); +static int unpack(const struct bencode *b, struct ben_decode_ctx *ctx, + va_list *vl); +static struct bencode *pack(struct ben_decode_ctx *ctx, va_list *vl); + +static size_t type_size(int type) +{ + switch (type) { + case BENCODE_BOOL: + return sizeof(struct bencode_bool); + case BENCODE_DICT: + return sizeof(struct bencode_dict); + case BENCODE_INT: + return sizeof(struct bencode_int); + case BENCODE_LIST: + return sizeof(struct bencode_list); + case BENCODE_STR: + return sizeof(struct bencode_str); + default: + die("Unknown type: %d\n", type); + } +} + +static void *alloc(int type) +{ + struct bencode *b = calloc(1, type_size(type)); + if (b == NULL) + return NULL; + b->type = type; + return b; +} + +void *ben_alloc_user(struct bencode_type *type) +{ + struct bencode_user *user = calloc(1, type->size); + if (user == NULL) + return NULL; + user->type = BENCODE_USER; + user->info = type; + return user; +} + +static int insufficient(struct ben_decode_ctx *ctx) +{ + ctx->error = BEN_INSUFFICIENT; + return -1; +} + +static int invalid(struct ben_decode_ctx *ctx) +{ + ctx->error = BEN_INVALID; + return -1; +} + +static int mismatch(struct ben_decode_ctx *ctx) +{ + ctx->error = BEN_MISMATCH; + return -1; +} + +void *ben_insufficient_ptr(struct ben_decode_ctx *ctx) +{ + ctx->error = BEN_INSUFFICIENT; + return NULL; +} + +void *ben_invalid_ptr(struct ben_decode_ctx *ctx) +{ + ctx->error = BEN_INVALID; + return NULL; +} + +void *ben_oom_ptr(struct ben_decode_ctx *ctx) +{ + ctx->error = BEN_NO_MEMORY; + return NULL; +} + +int ben_need_bytes(const struct ben_decode_ctx *ctx, size_t n) +{ + return ((ctx->off + n) <= ctx->len) ? 0 : -1; +} + +char ben_current_char(const struct ben_decode_ctx *ctx) +{ + return ctx->data[ctx->off]; +} + +const char *ben_current_buf(const struct ben_decode_ctx *ctx, size_t n) +{ + return ben_need_bytes(ctx, n) ? NULL : ctx->data + ctx->off; +} + +void ben_skip(struct ben_decode_ctx *ctx, size_t n) +{ + ctx->off += n; +} + +static struct bencode *internal_blob(void *data, size_t len) +{ + struct bencode_str *b = alloc(BENCODE_STR); + if (b == NULL) + return NULL; + b->s = data; + b->len = len; + assert(b->s[len] == 0); + return (struct bencode *) b; +} + +static void skip_to_next_line(struct ben_decode_ctx *ctx) +{ + for (; ctx->off < ctx->len; ctx->off++) { + if (ben_current_char(ctx) == '\n') { + ctx->line++; + ctx->off++; + break; + } + } +} + +static int seek_char(struct ben_decode_ctx *ctx) +{ + while (ctx->off < ctx->len) { + char c = ben_current_char(ctx); + if (isspace(c)) { + if (c == '\n') + ctx->line++; + ctx->off++; + } else if (c == '#') { + /* Skip comment */ + ctx->off++; + skip_to_next_line(ctx); + } else { + return 0; + } + } + return insufficient(ctx); +} + +/* + * Test if string 's' is located at current position. + * Increment current position and return 0 if the string matches. + * Returns -1 otherwise. The function avoids buffer overflow. + */ +static int try_match(struct ben_decode_ctx *ctx, const char *s) +{ + size_t n = strlen(s); + if (ben_need_bytes(ctx, n)) + return -1; + if (memcmp(ctx->data + ctx->off, s, n) != 0) + return -1; + ctx->off += n; + return 0; +} + +static int try_match_with_errors(struct ben_decode_ctx *ctx, const char *s) +{ + size_t n = strlen(s); + size_t left = ctx->len - ctx->off; + + assert(ctx->off <= ctx->len); + + if (left == 0) + return insufficient(ctx); + + if (left < n) { + if (memcmp(ctx->data + ctx->off, s, left) != 0) + return invalid(ctx); + return insufficient(ctx); + } + + if (memcmp(ctx->data + ctx->off, s, n) != 0) + return invalid(ctx); + + ctx->off += n; + return 0; +} + +int ben_allocate(struct bencode *b, size_t n) +{ + switch (b->type) { + case BENCODE_DICT: + return resize_dict(ben_dict_cast(b), n); + case BENCODE_LIST: + return resize_list(ben_list_cast(b), n); + default: + die("ben_allocate(): Unknown type %d\n", b->type); + } +} + +static struct bencode *clone_dict(const struct bencode_dict *d) +{ + struct bencode *key; + struct bencode *value; + struct bencode *newkey; + struct bencode *newvalue; + size_t pos; + struct bencode *newdict = ben_dict(); + if (newdict == NULL) + return NULL; + ben_dict_for_each(key, value, pos, (const struct bencode *) d) { + newkey = ben_clone(key); + newvalue = ben_clone(value); + if (newkey == NULL || newvalue == NULL) { + ben_free(newkey); + ben_free(newvalue); + goto error; + } + if (ben_dict_set(newdict, newkey, newvalue)) { + ben_free(newkey); + ben_free(newvalue); + goto error; + } + newkey = NULL; + newvalue = NULL; + } + return newdict; + +error: + ben_free(newdict); + return NULL; +} + +static struct bencode *clone_list(const struct bencode_list *list) +{ + struct bencode *value; + struct bencode *newvalue; + size_t pos; + struct bencode *newlist = ben_list(); + if (newlist == NULL) + return NULL; + ben_list_for_each(value, pos, (const struct bencode *) list) { + newvalue = ben_clone(value); + if (newvalue == NULL) + goto error; + if (ben_list_append(newlist, newvalue)) { + ben_free(newvalue); + goto error; + } + newvalue = NULL; + } + return newlist; + +error: + ben_free(newlist); + return NULL; +} + +static struct bencode *clone_str(const struct bencode_str *s) +{ + return ben_blob(s->s, s->len); +} + +static struct bencode *share_dict(const struct bencode_dict *d) +{ + struct bencode *newdict = ben_dict(); + if (newdict == NULL) + return NULL; + memcpy(newdict, d, sizeof(*d)); + ((struct bencode_dict *) newdict)->shared = 1; + return newdict; +} + +static struct bencode *share_list(const struct bencode_list *list) +{ + struct bencode *newlist = ben_list(); + if (newlist == NULL) + return NULL; + memcpy(newlist, list, sizeof(*list)); + ((struct bencode_list *) newlist)->shared = 1; + return newlist; +} + +struct bencode *ben_clone(const struct bencode *b) +{ + switch (b->type) { + case BENCODE_BOOL: + return ben_bool(ben_bool_const_cast(b)->b); + case BENCODE_DICT: + return clone_dict(ben_dict_const_cast(b)); + case BENCODE_INT: + return ben_int(ben_int_const_cast(b)->ll); + case BENCODE_LIST: + return clone_list(ben_list_const_cast(b)); + case BENCODE_STR: + return clone_str(ben_str_const_cast(b)); + default: + die("Invalid type %c\n", b->type); + } +} + +struct bencode *ben_shared_clone(const struct bencode *b) +{ + switch (b->type) { + case BENCODE_DICT: + return share_dict(ben_dict_const_cast(b)); + break; + case BENCODE_LIST: + return share_list(ben_list_const_cast(b)); + break; + default: + return ben_clone(b); + } +} + +static int cmp_dict(const struct bencode *a, const struct bencode *b) +{ + size_t len = ben_dict_len(a); + size_t pos; + struct bencode *key; + struct bencode *va; + struct bencode *vb; + int ret = 0; + struct bencode_keyvalue *pairs; + + if (len != ben_dict_len(b)) { + /* Returning any non-zero value is allowed */ + return (len < ben_dict_len(b)) ? -1 : 1; + } + + pairs = ben_dict_ordered_items(a); + for (pos = 0; pos < len; pos++) { + key = pairs[pos].key; + va = pairs[pos].value; + vb = ben_dict_get(b, key); + if (vb == NULL) { + /* Returning any non-zero value is allowed */ + ret = (a < b) ? -1 : 1; + break; + } + ret = ben_cmp(va, vb); + if (ret) + break; + } + + free(pairs); + return ret; +} + +static int cmp_list(const struct bencode *a, const struct bencode *b) +{ + const struct bencode_list *la; + const struct bencode_list *lb; + struct bencode *va; + struct bencode *vb; + size_t cmplen; + size_t i; + int ret; + + la = ben_list_const_cast(a); + lb = ben_list_const_cast(b); + cmplen = (la->n <= lb->n) ? la->n : lb->n; + + for (i = 0; i < cmplen; ++i) { + va = ben_list_get(a, i); + vb = ben_list_get(b, i); + ret = ben_cmp(va, vb); + if (ret) + return ret; + } + if (la->n != lb->n) + return (la->n < lb->n) ? -1 : 1; + return 0; +} + +int ben_cmp(const struct bencode *a, const struct bencode *b) +{ + size_t cmplen; + int ret; + const struct bencode_int *ia; + const struct bencode_int *ib; + const struct bencode_str *sa; + const struct bencode_str *sb; + const struct bencode_user *ua; + const struct bencode_user *ub; + + if (a->type != b->type) + return (a->type == BENCODE_INT) ? -1 : 1; + + switch (a->type) { + case BENCODE_INT: + ia = ben_int_const_cast(a); + ib = ben_int_const_cast(b); + if (ia->ll < ib->ll) + return -1; + if (ib->ll < ia->ll) + return 1; + return 0; + case BENCODE_STR: + sa = ben_str_const_cast(a); + sb = ben_str_const_cast(b); + cmplen = (sa->len <= sb->len) ? sa->len : sb->len; + ret = memcmp(sa->s, sb->s, cmplen); + if (ret) + return ret < 0 ? -1 : 1; + if (sa->len != sb->len) + return (sa->len < sb->len) ? -1 : 1; + return 0; + case BENCODE_DICT: + return cmp_dict(a, b); + case BENCODE_LIST: + return cmp_list(a, b); + case BENCODE_USER: + ua = ben_user_const_cast(a); + ub = ben_user_const_cast(b); + if (ua->info != ub->info) + return (a < b) ? -1 : 1; + return ua->info->cmp(a, b); + default: + die("Invalid type %c\n", b->type); + } +} + +int ben_cmp_with_str(const struct bencode *a, const char *s) +{ + struct bencode_str b; + inplace_ben_str(&b, s, strlen(s)); + return ben_cmp(a, (struct bencode *) &b); +} + +int ben_cmp_qsort(const void *a, const void *b) +{ + const struct bencode *akey = ((const struct bencode_keyvalue *) a)->key; + const struct bencode *bkey = ((const struct bencode_keyvalue *) b)->key; + return ben_cmp(akey, bkey); +} + +static struct bencode *decode_bool(struct ben_decode_ctx *ctx) +{ + struct bencode_bool *b; + char value; + char c; + if (ben_need_bytes(ctx, 2)) + return ben_insufficient_ptr(ctx); + ctx->off++; + + c = ben_current_char(ctx); + if (c != '0' && c != '1') + return ben_invalid_ptr(ctx); + + value = (c == '1'); + b = alloc(BENCODE_BOOL); + if (b == NULL) + return ben_oom_ptr(ctx); + + b->b = value; + ctx->off++; + return (struct bencode *) b; +} + +static size_t hash_bucket(long long hash, const struct bencode_dict *d) +{ + return hash & (d->alloc - 1); +} + +static size_t hash_bucket_head(long long hash, const struct bencode_dict *d) +{ + if (d->buckets == NULL) + return -1; + return d->buckets[hash_bucket(hash, d)]; +} + +static int resize_dict(struct bencode_dict *d, size_t newalloc) +{ + size_t *newbuckets; + struct bencode_dict_node *newnodes;; + size_t pos; + + if (newalloc == -1) { + if (d->alloc >= DICT_MAX_ALLOC) + return -1; + + if (d->alloc == 0) + newalloc = 4; + else + newalloc = d->alloc * 2; + } else { + size_t x; + if (newalloc < d->n || newalloc > DICT_MAX_ALLOC) + return -1; + /* Round to next power of two */ + x = 1; + while (x < newalloc) + x <<= 1; + assert(x >= newalloc); + newalloc = x; + if (newalloc > DICT_MAX_ALLOC) + return -1; + } + + /* size must be a power of two */ + assert((newalloc & (newalloc - 1)) == 0); + + newbuckets = realloc(d->buckets, sizeof(newbuckets[0]) * newalloc); + newnodes = realloc(d->nodes, sizeof(newnodes[0]) * newalloc); + if (newnodes == NULL || newbuckets == NULL) { + free(newnodes); + free(newbuckets); + return -1; + } + + d->alloc = newalloc; + d->buckets = newbuckets; + d->nodes = newnodes; + + /* Clear all buckets */ + memset(d->buckets, -1, d->alloc * sizeof(d->buckets[0])); + + /* Reinsert nodes into buckets */ + for (pos = 0; pos < d->n; pos++) { + struct bencode_dict_node *node = &d->nodes[pos]; + size_t bucket = hash_bucket(node->hash, d); + node->next = d->buckets[bucket]; + d->buckets[bucket] = pos; + } + + return 0; +} + +/* The string/binary object hash is copied from Python */ +static long long str_hash(const unsigned char *s, size_t len) +{ + long long hash; + size_t i; + if (len == 0) + return 0; + hash = s[0] << 7; + for (i = 0; i < len; i++) + hash = (1000003 * hash) ^ s[i]; + hash ^= len; + if (hash == -1) + hash = -2; + return hash; +} + +long long ben_str_hash(const struct bencode *b) +{ + const struct bencode_str *bstr = ben_str_const_cast(b); + const unsigned char *s = (unsigned char *) bstr->s; + return str_hash(s, bstr->len); +} + +long long ben_int_hash(const struct bencode *b) +{ + long long x = ben_int_const_cast(b)->ll; + return (x == -1) ? -2 : x; +} + +long long ben_hash(const struct bencode *b) +{ + switch (b->type) { + case BENCODE_INT: + return ben_int_hash(b); + case BENCODE_STR: + return ben_str_hash(b); + default: + die("hash: Invalid type: %d\n", b->type); + } +} + +static struct bencode *decode_dict(struct ben_decode_ctx *ctx) +{ + struct bencode *key; + struct bencode *lastkey = NULL; + struct bencode *value; + struct bencode_dict *d; + + d = alloc(BENCODE_DICT); + if (d == NULL) { + //warn("Not enough memory for dict\n"); + return ben_oom_ptr(ctx); + } + + ctx->off++; + + while (ctx->off < ctx->len && ben_current_char(ctx) != 'e') { + key = ben_ctx_decode(ctx); + if (key == NULL) + goto error; + if (key->type != BENCODE_INT && key->type != BENCODE_STR) { + ben_free(key); + key = NULL; + ctx->error = BEN_INVALID; + //warn("Invalid dict key type\n"); + goto error; + } + + if (lastkey != NULL && ben_cmp(lastkey, key) >= 0) { + ben_free(key); + key = NULL; + ctx->error = BEN_INVALID; + goto error; + } + + value = ben_ctx_decode(ctx); + if (value == NULL) { + ben_free(key); + key = NULL; + goto error; + } + + if (ben_dict_set((struct bencode *) d, key, value)) { + ben_free(key); + ben_free(value); + key = NULL; + value = NULL; + ctx->error = BEN_NO_MEMORY; + goto error; + } + + lastkey = key; + } + if (ctx->off >= ctx->len) { + ctx->error = BEN_INSUFFICIENT; + goto error; + } + + ctx->off++; + + return (struct bencode *) d; + +error: + ben_free((struct bencode *) d); + return NULL; +} + +static size_t find(const struct ben_decode_ctx *ctx, char c) +{ + char *match = memchr(ctx->data + ctx->off, c, ctx->len - ctx->off); + if (match == NULL) + return -1; + return (size_t) (match - ctx->data); +} + +/* off is the position of first number in */ +static int read_long_long(long long *ll, struct ben_decode_ctx *ctx, int c) +{ + char buf[LONGLONGSIZE]; /* fits all 64 bit integers */ + char *endptr; + size_t slen; + size_t pos = find(ctx, c); + + if (pos == -1) + return insufficient(ctx); + + slen = pos - ctx->off; + if (slen == 0 || slen >= sizeof buf) + return invalid(ctx); + + assert(slen < sizeof buf); + memcpy(buf, ctx->data + ctx->off, slen); + buf[slen] = 0; + + if (buf[0] != '-' && !isdigit(buf[0])) + return invalid(ctx); + + errno = 0; + *ll = strtoll(buf, &endptr, 10); + if (errno == ERANGE || *endptr != 0) + return invalid(ctx); + + /* + * Demand a unique encoding for all integers. + * Zero may not begin with a (minus) sign. + * Non-zero integers may not have leading zeros in the encoding. + */ + if (buf[0] == '-' && buf[1] == '0') + return invalid(ctx); + if (buf[0] == '0' && pos != (ctx->off + 1)) + return invalid(ctx); + + ctx->off = pos + 1; + return 0; +} + +static struct bencode *decode_int(struct ben_decode_ctx *ctx) +{ + struct bencode_int *b; + long long ll; + ctx->off++; + if (read_long_long(&ll, ctx, 'e')) + return NULL; + b = alloc(BENCODE_INT); + if (b == NULL) + return ben_oom_ptr(ctx); + b->ll = ll; + return (struct bencode *) b; +} + +static int resize_list(struct bencode_list *list, size_t newalloc) +{ + struct bencode **newvalues; + size_t newsize; + + if (newalloc == -1) { + if (list->alloc >= MAX_ALLOC) + return -1; + if (list->alloc == 0) + newalloc = 4; + else + newalloc = list->alloc * 2; + } else { + if (newalloc < list->n || newalloc > MAX_ALLOC) + return -1; + } + + newsize = sizeof(list->values[0]) * newalloc; + newvalues = realloc(list->values, newsize); + if (newvalues == NULL) + return -1; + list->alloc = newalloc; + list->values = newvalues; + return 0; +} + +static struct bencode *decode_list(struct ben_decode_ctx *ctx) +{ + struct bencode_list *l = alloc(BENCODE_LIST); + if (l == NULL) + return ben_oom_ptr(ctx); + + ctx->off++; + + while (ctx->off < ctx->len && ben_current_char(ctx) != 'e') { + struct bencode *b = ben_ctx_decode(ctx); + if (b == NULL) + goto error; + if (ben_list_append((struct bencode *) l, b)) { + ben_free(b); + ctx->error = BEN_NO_MEMORY; + goto error; + } + } + + if (ctx->off >= ctx->len) { + ctx->error = BEN_INSUFFICIENT; + goto error; + } + + ctx->off++; + return (struct bencode *) l; + +error: + ben_free((struct bencode *) l); + return NULL; +} + +static size_t read_size_t(struct ben_decode_ctx *ctx, int c) +{ + long long ll; + size_t s; + if (read_long_long(&ll, ctx, c)) + return -1; + if (ll < 0) + return invalid(ctx); + /* + * Test that information is not lost when converting from long long + * to size_t + */ + s = (size_t) ll; + if (ll != (long long) s) + return invalid(ctx); + return s; +} + +static struct bencode *decode_str(struct ben_decode_ctx *ctx) +{ + struct bencode *b; + size_t datalen = read_size_t(ctx, ':'); /* Read the string length */ + if (datalen == -1) + return NULL; + + if (ben_need_bytes(ctx, datalen)) + return ben_insufficient_ptr(ctx); + + /* Allocate string structure and copy data into it */ + b = ben_blob(ctx->data + ctx->off, datalen); + ctx->off += datalen; + return b; +} + +struct bencode *ben_ctx_decode(struct ben_decode_ctx *ctx) +{ + char c; + struct bencode_type *type; + struct bencode *b; + ctx->level++; + if (ctx->level > 256) + return ben_invalid_ptr(ctx); + + if (ctx->off == ctx->len) + return ben_insufficient_ptr(ctx); + + assert (ctx->off < ctx->len); + c = ben_current_char(ctx); + switch (c) { + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + b = decode_str(ctx); + break; + case 'b': + b = decode_bool(ctx); + break; + case 'd': + b = decode_dict(ctx); + break; + case 'i': + b = decode_int(ctx); + break; + case 'l': + b = decode_list(ctx); + break; + default: + if (ctx->types && (unsigned char) c < 128) { + type = ctx->types[(unsigned char) c]; + if (type) { + ctx->off++; + b = type->decode(ctx); + } else + return ben_invalid_ptr(ctx); + } else + return ben_invalid_ptr(ctx); + } + ctx->level--; + return b; +} + +struct bencode *ben_decode(const void *data, size_t len) +{ + struct ben_decode_ctx ctx = {.data = data, .len = len}; + struct bencode *b = ben_ctx_decode(&ctx); + if (b != NULL && ctx.off != len) { + ben_free(b); + return NULL; + } + return b; +} + +struct bencode *ben_decode2(const void *data, size_t len, size_t *off, int *error) +{ + struct ben_decode_ctx ctx = {.data = data, .len = len, .off = *off}; + struct bencode *b = ben_ctx_decode(&ctx); + *off = ctx.off; + if (error != NULL) { + assert((b != NULL) ^ (ctx.error != 0)); + *error = ctx.error; + } + return b; +} + +struct bencode *ben_decode3(const void *data, size_t len, size_t *off, int *error, struct bencode_type *types[128]) +{ + struct ben_decode_ctx ctx = {.data = data, .len = len, .off = *off, + .types = types}; + struct bencode *b = ben_ctx_decode(&ctx); + *off = ctx.off; + if (error != NULL) { + assert((b != NULL) ^ (ctx.error != 0)); + *error = ctx.error; + } + return b; +} + +static struct bencode *decode_printed_bool(struct ben_decode_ctx *ctx) +{ + struct bencode *b; + int bval = -1; + + if (try_match(ctx, "True")) { + if (ben_need_bytes(ctx, 4)) + return ben_insufficient_ptr(ctx); + } else { + bval = 1; + } + + if (bval < 0) { + /* It's not 'True', so it can only be 'False'. Verify it. */ + if (try_match_with_errors(ctx, "False")) + return NULL; + bval = 0; + } + + assert(bval == 0 || bval == 1); + b = ben_bool(bval); + if (b == NULL) + return ben_oom_ptr(ctx); + return b; +} + +static struct bencode *decode_printed_dict(struct ben_decode_ctx *ctx) +{ + struct bencode *d = ben_dict(); + struct bencode *key = NULL; + struct bencode *value = NULL; + + if (d == NULL) + return ben_oom_ptr(ctx); + + ctx->off++; + + while (1) { + if (seek_char(ctx)) + goto nullpath; + if (ben_current_char(ctx) == '}') { + ctx->off++; + break; + } + + key = decode_printed(ctx); + if (key == NULL) + goto nullpath; + + if (seek_char(ctx)) + goto nullpath; + if (ben_current_char(ctx) != ':') + goto invalidpath; + ctx->off++; + + value = decode_printed(ctx); + if (value == NULL) + goto nullpath; + + if (ben_dict_set(d, key, value)) { + ben_free(key); + ben_free(value); + ben_free(d); + return ben_oom_ptr(ctx); + } + key = NULL; + value = NULL; + + if (seek_char(ctx)) + goto nullpath; + if (ben_current_char(ctx) == ',') + ctx->off++; + else if (ben_current_char(ctx) != '}') + goto invalidpath; + } + return d; + +invalidpath: + ben_free(key); + ben_free(value); + ben_free(d); + return ben_invalid_ptr(ctx); + +nullpath: + ben_free(key); + ben_free(value); + ben_free(d); + return NULL; +} + +static struct bencode *decode_printed_int(struct ben_decode_ctx *ctx) +{ + long long ll; + char buf[LONGLONGSIZE]; + char *end; + size_t pos = 0; + struct bencode *b; + int gotzero = 0; + int base = 10; + int neg = 0; + + if (ben_current_char(ctx) == '-') { + neg = 1; + ctx->off++; + } + if (ctx->off == ctx->len) + return ben_insufficient_ptr(ctx); + + if (ben_current_char(ctx) == '0') { + buf[pos] = '0'; + pos++; + ctx->off++; + gotzero = 1; + } + + if (gotzero) { + if (ctx->off == ctx->len) { + ll = 0; + goto returnwithval; + } + if (ben_current_char(ctx) == 'x') { + pos = 0; + base = 16; + ctx->off++; + if (ctx->off == ctx->len) + return ben_insufficient_ptr(ctx); + } else if (isdigit(ben_current_char(ctx))) { + base = 8; + } + } else { + if (ctx->off == ctx->len) + return ben_insufficient_ptr(ctx); + } + + while (ctx->off < ctx->len && pos < sizeof buf) { + char c = ben_current_char(ctx); + if (base == 16) { + if (!isxdigit(c)) + break; + } else { + if (!isdigit(c)) + break; + } + buf[pos] = c; + pos++; + ctx->off++; + } + if (pos == 0 || pos == sizeof buf) + return ben_invalid_ptr(ctx); + buf[pos] = 0; + ll = strtoll(buf, &end, base); + if (*end != 0) + return ben_invalid_ptr(ctx); + +returnwithval: + if (neg) + ll = -ll; + b = ben_int(ll); + if (b == NULL) + return ben_oom_ptr(ctx); + return b; +} + +static struct bencode *decode_printed_list(struct ben_decode_ctx *ctx) +{ + struct bencode *l = ben_list(); + struct bencode *b = NULL; + + if (l == NULL) + return ben_oom_ptr(ctx); + + ctx->off++; + + while (1) { + if (seek_char(ctx)) + goto nullpath; + if (ben_current_char(ctx) == ']') { + ctx->off++; + break; + } + b = decode_printed(ctx); + if (b == NULL) + goto nullpath; + if (ben_list_append(l, b)) { + ben_free(b); + ben_free(l); + return ben_oom_ptr(ctx); + } + b = NULL; + + if (seek_char(ctx)) + goto nullpath; + if (ben_current_char(ctx) == ',') + ctx->off++; + else if (ben_current_char(ctx) != ']') { + ben_free(l); + return ben_invalid_ptr(ctx); + } + } + return l; + +nullpath: + ben_free(b); + ben_free(l); + return NULL; +} + +static struct bencode *decode_printed_str(struct ben_decode_ctx *ctx) +{ + size_t pos; + char *s = NULL; + size_t len = 0; + char initial = ben_current_char(ctx); + struct bencode *b; + + ctx->off++; + pos = ctx->off; + while (pos < ctx->len) { + char c = ctx->data[pos]; + if (!isprint(c)) + return ben_invalid_ptr(ctx); + if (c == initial) + break; + len++; + pos++; + if (c != '\\') + continue; /* Normal printable char, e.g. 'a' */ + /* Handle '\\' */ + if (pos == ctx->len) + return ben_insufficient_ptr(ctx); + + c = ctx->data[pos]; + pos++; + if (c == 'x') { + /* hexadecimal value: \xHH */ + pos += 2; + } + } + if (pos >= ctx->len) + return ben_insufficient_ptr(ctx); + + s = malloc(len + 1); + if (s == NULL) + return ben_oom_ptr(ctx); + + pos = 0; + while (ctx->off < ctx->len) { + char c = ben_current_char(ctx); + assert(isprint(c)); + if (c == initial) + break; + assert(pos < len); + ctx->off++; + if (c != '\\') { + s[pos] = c; + pos++; + continue; /* Normal printable char, e.g. 'a' */ + } + /* Handle '\\' */ + + /* + * Note, we do assert because we have already verified in the + * previous loop that there is sufficient data. + */ + assert(ctx->off != ctx->len); + c = ben_current_char(ctx); + ctx->off++; + if (c == 'x') { + /* hexadecimal value: \xHH */ + char *end; + unsigned long x; + char buf[3]; + assert((ctx->off + 1) < ctx->len); + buf[0] = ctx->data[ctx->off + 0]; + buf[1] = ctx->data[ctx->off + 1]; + buf[2] = 0; + ctx->off += 2; + x = strtoul(buf, &end, 16); + if (*end != 0) + goto invalid; + assert(x < 256); + c = (char) x; + } + s[pos] = c; + pos++; + } + assert(pos == len); + if (ctx->off >= ctx->len) + return ben_insufficient_ptr(ctx); + ctx->off++; + + s[pos] = 0; /* the area must always be zero terminated! */ + + b = internal_blob(s, len); + if (b == NULL) { + free(s); + return ben_oom_ptr(ctx); + } + return b; + +invalid: + free(s); + return ben_invalid_ptr(ctx); +} + +static struct bencode *decode_printed(struct ben_decode_ctx *ctx) +{ + struct bencode *b; + + ctx->level++; + if (ctx->level > 256) + return ben_invalid_ptr(ctx); + + if (seek_char(ctx)) + return NULL; + + switch (ben_current_char(ctx)) { + case '\'': + case '"': + b = decode_printed_str(ctx); + break; + case '-': + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + b = decode_printed_int(ctx); + break; + case 'F': + case 'T': + b = decode_printed_bool(ctx); + break; + case '[': + b = decode_printed_list(ctx); + break; + case '{': + b = decode_printed_dict(ctx); + break; + default: + return ben_invalid_ptr(ctx); + } + ctx->level--; + return b; +} + +struct bencode *ben_decode_printed(const void *data, size_t len) +{ + struct ben_decode_ctx ctx = {.data = data, .len = len}; + return decode_printed(&ctx); +} + +struct bencode *ben_decode_printed2(const void *data, size_t len, size_t *off, struct bencode_error *error) +{ + struct ben_decode_ctx ctx = {.data = data, .len = len, .off = *off}; + struct bencode *b = decode_printed(&ctx); + *off = ctx.off; + if (error != NULL) { + assert((b != NULL) ^ (ctx.error != 0)); + error->error = ctx.error; + if (b != NULL) { + error->off = 0; + error->line = 0; + } else { + error->off = ctx.off; + error->line = ctx.line; + } + } + return b; +} + +static void free_dict(struct bencode_dict *d) +{ + size_t pos; + if (d->shared) + return; + for (pos = 0; pos < d->n; pos++) { + ben_free(d->nodes[pos].key); + ben_free(d->nodes[pos].value); + d->nodes[pos].key = NULL; + d->nodes[pos].value = NULL; + } + free(d->buckets); + free(d->nodes); +} + +static void free_list(struct bencode_list *list) +{ + size_t pos; + if (list->shared) + return; + for (pos = 0; pos < list->n; pos++) { + ben_free(list->values[pos]); + list->values[pos] = NULL; + } + free(list->values); +} + +int ben_put_char(struct ben_encode_ctx *ctx, char c) +{ + if (ctx->pos >= ctx->size) + return -1; + ctx->data[ctx->pos] = c; + ctx->pos++; + return 0; +} + +int ben_put_buffer(struct ben_encode_ctx *ctx, const void *buf, size_t len) +{ + if ((ctx->pos + len) > ctx->size) + return -1; + memcpy(ctx->data + ctx->pos, buf, len); + ctx->pos += len; + return 0; +} + +static int puthexchar(struct ben_encode_ctx *ctx, unsigned char hex) +{ + char buf[5]; + int len = snprintf(buf, sizeof buf, "\\x%.2x", hex); + assert(len == 4); + return ben_put_buffer(ctx, buf, len); +} + +static int putlonglong(struct ben_encode_ctx *ctx, long long ll) +{ + char buf[LONGLONGSIZE]; + int len = snprintf(buf, sizeof buf, "%lld", ll); + assert(len > 0); + return ben_put_buffer(ctx, buf, len); +} + +static int putunsignedlonglong(struct ben_encode_ctx *ctx, unsigned long long llu) +{ + char buf[LONGLONGSIZE]; + int len = snprintf(buf, sizeof buf, "%llu", llu); + assert(len > 0); + return ben_put_buffer(ctx, buf, len); +} + +static int putstr(struct ben_encode_ctx *ctx, char *s) +{ + return ben_put_buffer(ctx, s, strlen(s)); +} + +static int print(struct ben_encode_ctx *ctx, const struct bencode *b) +{ + const struct bencode_bool *boolean; + const struct bencode_int *integer; + const struct bencode_list *list; + const struct bencode_str *s; + size_t i; + size_t len; + struct bencode_keyvalue *pairs; + + switch (b->type) { + case BENCODE_BOOL: + boolean = ben_bool_const_cast(b); + return putstr(ctx, boolean->b ? "True" : "False"); + + case BENCODE_DICT: + if (ben_put_char(ctx, '{')) + return -1; + + pairs = ben_dict_ordered_items(b); + if (pairs == NULL) { + //warn("No memory for dict serialization\n"); + return -1; + } + + len = ben_dict_len(b); + for (i = 0; i < len; i++) { + if (print(ctx, pairs[i].key)) + break; + if (putstr(ctx, ": ")) + break; + if (print(ctx, pairs[i].value)) + break; + if (i < (len - 1)) { + if (putstr(ctx, ", ")) + break; + } + } + free(pairs); + pairs = NULL; + if (i < len) + return -1; + + return ben_put_char(ctx, '}'); + + case BENCODE_INT: + integer = ben_int_const_cast(b); + + if (putlonglong(ctx, integer->ll)) + return -1; + + return 0; + + case BENCODE_LIST: + if (ben_put_char(ctx, '[')) + return -1; + list = ben_list_const_cast(b); + for (i = 0; i < list->n; i++) { + if (print(ctx, list->values[i])) + return -1; + if (i < (list->n - 1) && putstr(ctx, ", ")) + return -1; + } + return ben_put_char(ctx, ']'); + + case BENCODE_STR: + s = ben_str_const_cast(b); + if (ben_put_char(ctx, '\'')) + return -1; + for (i = 0; i < s->len; i++) { + if (!isprint(s->s[i])) { + if (puthexchar(ctx, s->s[i])) + return -1; + continue; + } + + switch (s->s[i]) { + case '\'': + case '\\': + /* Need escape character */ + if (ben_put_char(ctx, '\\')) + return -1; + default: + if (ben_put_char(ctx, s->s[i])) + return -1; + break; + } + } + return ben_put_char(ctx, '\''); + default: + die("serialization type %d not implemented\n", b->type); + } +} + +static size_t get_printed_size(const struct bencode *b) +{ + size_t pos; + const struct bencode_bool *boolean; + const struct bencode_dict *d; + const struct bencode_int *i; + const struct bencode_list *l; + const struct bencode_str *s; + size_t size = 0; + char buf[1]; + + switch (b->type) { + case BENCODE_BOOL: + boolean = ben_bool_const_cast(b); + return boolean->b ? 4 : 5; /* "True" and "False" */ + case BENCODE_DICT: + size++; /* "{" */ + d = ben_dict_const_cast(b); + for (pos = 0; pos < d->n; pos++) { + size += get_printed_size(d->nodes[pos].key); + size += 2; /* ": " */ + size += get_printed_size(d->nodes[pos].value); + if (pos < (d->n - 1)) + size += 2; /* ", " */ + } + size++; /* "}" */ + return size; + case BENCODE_INT: + i = ben_int_const_cast(b); + return snprintf(buf, 0, "%lld", i->ll); + case BENCODE_LIST: + size++; /* "[" */ + l = ben_list_const_cast(b); + for (pos = 0; pos < l->n; pos++) { + size += get_printed_size(l->values[pos]); + if (pos < (l->n - 1)) + size += 2; /* ", " */ + } + size++; /* "]" */ + return size; + case BENCODE_STR: + s = ben_str_const_cast(b); + size++; /* ' */ + for (pos = 0; pos < s->len; pos++) { + if (!isprint(s->s[pos])) { + size += 4; /* "\xDD" */ + continue; + } + switch (s->s[pos]) { + case '\'': + case '\\': + size += 2; /* escaped characters */ + break; + default: + size++; + break; + } + } + size++; /* ' */ + return size; + default: + die("Unknown type: %c\n", b->type); + } +} + +int ben_ctx_encode(struct ben_encode_ctx *ctx, const struct bencode *b) +{ + const struct bencode_bool *boolean; + const struct bencode_int *integer; + const struct bencode_list *list; + const struct bencode_str *s; + const struct bencode_user *u; + size_t i; + size_t len; + struct bencode_keyvalue *pairs; + + switch (b->type) { + case BENCODE_BOOL: + boolean = ben_bool_const_cast(b); + return putstr(ctx, boolean->b ? "b1" : "b0"); + + case BENCODE_DICT: + if (ben_put_char(ctx, 'd')) + return -1; + + pairs = ben_dict_ordered_items(b); + if (pairs == NULL) { + //warn("No memory for dict serialization\n"); + return -1; + } + + len = ben_dict_len(b); + for (i = 0; i < len; i++) { + if (ben_ctx_encode(ctx, pairs[i].key)) + break; + if (ben_ctx_encode(ctx, pairs[i].value)) + break; + } + free(pairs); + pairs = NULL; + if (i < len) + return -1; + + return ben_put_char(ctx, 'e'); + + case BENCODE_INT: + if (ben_put_char(ctx, 'i')) + return -1; + integer = ben_int_const_cast(b); + if (putlonglong(ctx, integer->ll)) + return -1; + return ben_put_char(ctx, 'e'); + + case BENCODE_LIST: + if (ben_put_char(ctx, 'l')) + return -1; + + list = ben_list_const_cast(b); + for (i = 0; i < list->n; i++) { + if (ben_ctx_encode(ctx, list->values[i])) + return -1; + } + + return ben_put_char(ctx, 'e'); + + case BENCODE_STR: + s = ben_str_const_cast(b); + if (putunsignedlonglong(ctx, ((long long) s->len))) + return -1; + if (ben_put_char(ctx, ':')) + return -1; + return ben_put_buffer(ctx, s->s, s->len); + + case BENCODE_USER: + u = ben_user_const_cast(b); + return u->info->encode(ctx, b); + + default: + die("serialization type %d not implemented\n", b->type); + } +} + +static size_t get_size(const struct bencode *b) +{ + size_t pos; + const struct bencode_dict *d; + const struct bencode_int *i; + const struct bencode_list *l; + const struct bencode_str *s; + const struct bencode_user *u; + size_t size = 0; + char buf[1]; + + switch (b->type) { + case BENCODE_BOOL: + return 2; + case BENCODE_DICT: + d = ben_dict_const_cast(b); + for (pos = 0; pos < d->n; pos++) { + size += get_size(d->nodes[pos].key); + size += get_size(d->nodes[pos].value); + } + return size + 2; + case BENCODE_INT: + i = ben_int_const_cast(b); + return 2 + snprintf(buf, 0, "%lld", i->ll); + case BENCODE_LIST: + l = ben_list_const_cast(b); + for (pos = 0; pos < l->n; pos++) + size += get_size(l->values[pos]); + return size + 2; + case BENCODE_STR: + s = ben_str_const_cast(b); + return snprintf(buf, 0, "%zu", s->len) + 1 + s->len; + case BENCODE_USER: + u = ben_user_const_cast(b); + return u->info->get_size(b); + default: + die("Unknown type: %c\n", b->type); + } +} + +size_t ben_encoded_size(const struct bencode *b) +{ + return get_size(b); +} + +void *ben_encode(size_t *len, const struct bencode *b) +{ + size_t size = get_size(b); + void *data = malloc(size); + struct ben_encode_ctx ctx = {.data = data, .size = size}; + if (data == NULL) { + //warn("No memory to encode\n"); + return NULL; + } + if (ben_ctx_encode(&ctx, b)) { + free(ctx.data); + return NULL; + } + assert(ctx.pos == size); + *len = ctx.pos; + return data; +} + +size_t ben_encode2(char *data, size_t maxlen, const struct bencode *b) +{ + struct ben_encode_ctx ctx = {.data = data, .size = maxlen, .pos = 0}; + if (ben_ctx_encode(&ctx, b)) + return -1; + return ctx.pos; +} + +void ben_free(struct bencode *b) +{ + struct bencode_str *s; + struct bencode_user *u; + size_t size; + if (b == NULL) + return; + switch (b->type) { + case BENCODE_BOOL: + break; + case BENCODE_DICT: + free_dict(ben_dict_cast(b)); + break; + case BENCODE_INT: + break; + case BENCODE_LIST: + free_list(ben_list_cast(b)); + break; + case BENCODE_STR: + s = ben_str_cast(b); + free(s->s); + break; + case BENCODE_USER: + u = ben_user_cast(b); + if (u->info->free) + u->info->free(b); + break; + default: + die("invalid type: %d\n", b->type); + } + + if (b->type == BENCODE_USER) + size = ((struct bencode_user *) b)->info->size; + else + size = type_size(b->type); + memset(b, -1, size); /* data poison */ + free(b); +} + +struct bencode *ben_blob(const void *data, size_t len) +{ + struct bencode_str *b = alloc(BENCODE_STR); + if (b == NULL) + return NULL; + /* Allocate one extra byte for zero termination for convenient use */ + b->s = malloc(len + 1); + if (b->s == NULL) { + free(b); + return NULL; + } + memcpy(b->s, data, len); + b->len = len; + b->s[len] = 0; + return (struct bencode *) b; +} + +struct bencode *ben_bool(int boolean) +{ + struct bencode_bool *b = alloc(BENCODE_BOOL); + if (b == NULL) + return NULL; + b->b = boolean ? 1 : 0; + return (struct bencode *) b; +} + +struct bencode *ben_dict(void) +{ + return alloc(BENCODE_DICT); +} + +struct bencode *ben_dict_get(const struct bencode *dict, const struct bencode *key) +{ + const struct bencode_dict *d = ben_dict_const_cast(dict); + long long hash = ben_hash(key); + size_t pos = hash_bucket_head(hash, d); + while (pos != -1) { + assert(pos < d->n); + if (d->nodes[pos].hash == hash && + ben_cmp(d->nodes[pos].key, key) == 0) + return d->nodes[pos].value; + pos = d->nodes[pos].next; + } + return NULL; +} + +/* + * Note, we do not re-allocate memory, so one may not call ben_free for these + * instances. These are only used to optimize speed. + */ +static void inplace_ben_str(struct bencode_str *b, const char *s, size_t len) +{ + b->type = BENCODE_STR; + b->len = len; + b->s = (char *) s; +} + +static void inplace_ben_int(struct bencode_int *i, long long ll) +{ + i->type = BENCODE_INT; + i->ll = ll; +} + +struct bencode *ben_dict_get_by_str(const struct bencode *dict, const char *key) +{ + struct bencode_str s; + inplace_ben_str(&s, key, strlen(key)); + return ben_dict_get(dict, (struct bencode *) &s); +} + +struct bencode *ben_dict_get_by_int(const struct bencode *dict, long long key) +{ + struct bencode_int i; + inplace_ben_int(&i, key); + return ben_dict_get(dict, (struct bencode *) &i); +} + +struct bencode_keyvalue *ben_dict_ordered_items(const struct bencode *b) +{ + struct bencode_keyvalue *pairs; + size_t i; + const struct bencode_dict *dict = ben_dict_const_cast(b); + if (dict == NULL) + return NULL; + pairs = malloc(dict->n * sizeof(pairs[0])); + if (pairs == NULL) + return NULL; + for (i = 0; i < dict->n; i++) { + pairs[i].key = dict->nodes[i].key; + pairs[i].value = dict->nodes[i].value; + } + qsort(pairs, dict->n, sizeof(pairs[0]), ben_cmp_qsort); + return pairs; +} + +static size_t dict_find_pos(struct bencode_dict *d, + const struct bencode *key, long long hash) +{ + size_t pos = hash_bucket_head(hash, d); + while (pos != -1) { + assert(pos < d->n); + if (d->nodes[pos].hash == hash && + ben_cmp(d->nodes[pos].key, key) == 0) + break; + pos = d->nodes[pos].next; + } + return pos; +} + +static void dict_unlink(struct bencode_dict *d, size_t bucket, size_t unlinkpos) +{ + size_t pos = d->buckets[bucket]; + size_t next; + size_t nextnext; + + assert(unlinkpos < d->n); + + if (pos == unlinkpos) { + next = d->nodes[unlinkpos].next; + assert(next < d->n || next == -1); + d->buckets[bucket] = next; + return; + } + while (pos != -1) { + assert(pos < d->n); + next = d->nodes[pos].next; + if (next == unlinkpos) { + nextnext = d->nodes[next].next; + assert(nextnext < d->n || nextnext == -1); + d->nodes[pos].next = nextnext; + return; + } + pos = next; + } + die("Key should have been found. Can not unlink position %zu.\n", unlinkpos); +} + +/* Remove node from the linked list, if found */ +static struct bencode *dict_pop(struct bencode_dict *d, + const struct bencode *key, long long hash) +{ + struct bencode *value; + size_t removebucket = hash_bucket(hash, d); + size_t tailpos = d->n - 1; + size_t tailhash = d->nodes[tailpos].hash; + size_t tailbucket = hash_bucket(tailhash, d); + size_t removepos; + + removepos = dict_find_pos(d, key, hash); + if (removepos == -1) + return NULL; + key = NULL; /* avoid using the pointer again, it may not be valid */ + + /* + * WARNING: complicated code follows. + * + * First, unlink the node to be removed and the tail node. + * We will actually later swap the positions of removed node and + * tail node inside the d->nodes array. We want to preserve + * d->nodes array in a state where positions from 0 to (d->n - 1) + * are always occupied with a valid node. This is done to make + * dictionary walk fast by simply walking positions 0 to (d->n - 1) + * in a for loop. + */ + dict_unlink(d, removebucket, removepos); + if (removepos != tailpos) + dict_unlink(d, tailbucket, tailpos); + + /* Then read the removed node and free its key */ + value = d->nodes[removepos].value; + ben_free(d->nodes[removepos].key); + + /* Then re-insert the unliked tail node in the place of removed node */ + d->nodes[removepos] = d->nodes[tailpos]; + memset(&d->nodes[tailpos], 0, sizeof d->nodes[tailpos]); /* poison */ + d->nodes[tailpos].next = ((size_t) -1) / 2; + + /* + * Then re-link the tail node to its bucket, unless the tail node + * was the one to be removed. + */ + if (removepos != tailpos) { + d->nodes[removepos].next = d->buckets[tailbucket]; + d->buckets[tailbucket] = removepos; + } + + d->n--; + + if (d->n <= (d->alloc / 4) && d->alloc >= 8) + resize_dict(d, d->alloc / 2); + + return value; +} + +struct bencode *ben_dict_pop(struct bencode *dict, const struct bencode *key) +{ + struct bencode_dict *d = ben_dict_cast(dict); + return dict_pop(d, key, ben_hash(key)); +} + +struct bencode *ben_dict_pop_by_str(struct bencode *dict, const char *key) +{ + struct bencode_str s; + inplace_ben_str(&s, key, strlen(key)); + return ben_dict_pop(dict, (struct bencode *) &s); +} + +struct bencode *ben_dict_pop_by_int(struct bencode *dict, long long key) +{ + struct bencode_int i; + inplace_ben_int(&i, key); + return ben_dict_pop(dict, (struct bencode *) &i); +} + +/* This can be used from the ben_dict_for_each() iterator */ +struct bencode *ben_dict_pop_current(struct bencode *dict, size_t *pos) +{ + struct bencode_dict *d = ben_dict_cast(dict); + struct bencode *value = ben_dict_pop(dict, d->nodes[*pos].key); + (*pos)--; + return value; +} + +int ben_dict_set(struct bencode *dict, struct bencode *key, struct bencode *value) +{ + struct bencode_dict *d = ben_dict_cast(dict); + long long hash = ben_hash(key); + size_t bucket; + size_t pos; + + assert(value != NULL); + + pos = hash_bucket_head(hash, d); + for (; pos != -1; pos = d->nodes[pos].next) { + assert(pos < d->n); + if (d->nodes[pos].hash != hash || ben_cmp(d->nodes[pos].key, key) != 0) + continue; + ben_free(d->nodes[pos].key); + ben_free(d->nodes[pos].value); + d->nodes[pos].key = key; + d->nodes[pos].value = value; + /* 'hash' and 'next' members stay the same */ + return 0; + } + + assert(d->n <= d->alloc); + if (d->n == d->alloc && resize_dict(d, -1)) + return -1; + + bucket = hash_bucket(hash, d); + pos = d->n; + d->nodes[pos] = (struct bencode_dict_node) {.hash = hash, + .key = key, + .value = value, + .next = d->buckets[bucket]}; + d->n++; + d->buckets[bucket] = pos; + return 0; +} + +int ben_dict_set_by_str(struct bencode *dict, const char *key, struct bencode *value) +{ + struct bencode *bkey = ben_str(key); + if (bkey == NULL) + return -1; + if (ben_dict_set(dict, bkey, value)) { + ben_free(bkey); + return -1; + } + return 0; +} + +int ben_dict_set_str_by_str(struct bencode *dict, const char *key, const char *value) +{ + struct bencode *bkey = ben_str(key); + struct bencode *bvalue = ben_str(value); + if (bkey == NULL || bvalue == NULL) { + ben_free(bkey); + ben_free(bvalue); + return -1; + } + if (ben_dict_set(dict, bkey, bvalue)) { + ben_free(bkey); + ben_free(bvalue); + return -1; + } + return 0; +} + +struct bencode *ben_int(long long ll) +{ + struct bencode_int *b = alloc(BENCODE_INT); + if (b == NULL) + return NULL; + b->ll = ll; + return (struct bencode *) b; +} + +struct bencode *ben_list(void) +{ + return alloc(BENCODE_LIST); +} + +int ben_list_append(struct bencode *list, struct bencode *b) +{ + struct bencode_list *l = ben_list_cast(list); + /* NULL pointer de-reference if the cast fails */ + assert(l->n <= l->alloc); + if (l->n == l->alloc && resize_list(l, -1)) + return -1; + assert(b != NULL); + l->values[l->n] = b; + l->n++; + return 0; +} + +int ben_list_append_str(struct bencode *list, const char *s) +{ + struct bencode *bs = ben_str(s); + if (bs == NULL) + return -1; + return ben_list_append(list, bs); +} + +int ben_list_append_int(struct bencode *list, long long ll) +{ + struct bencode *bll = ben_int(ll); + if (bll == NULL) + return -1; + return ben_list_append(list, bll); +} + +struct bencode *ben_list_pop(struct bencode *list, size_t pos) +{ + struct bencode_list *l = ben_list_cast(list); + struct bencode *value; + + assert(pos < l->n); + + value = ben_list_get(list, pos); + + for (; (pos + 1) < l->n; pos++) + l->values[pos] = l->values[pos + 1]; + + l->values[l->n - 1] = NULL; + l->n--; + return value; +} + +void ben_list_set(struct bencode *list, size_t i, struct bencode *b) +{ + struct bencode_list *l = ben_list_cast(list); + if (i >= l->n) + die("ben_list_set() out of bounds: %zu\n", i); + + ben_free(l->values[i]); + assert(b != NULL); + l->values[i] = b; +} + +char *ben_print(const struct bencode *b) +{ + size_t size = get_printed_size(b); + char *data = malloc(size + 1); + struct ben_encode_ctx ctx = {.data = data, .size = size, .pos = 0}; + if (data == NULL) { + //warn("No memory to print\n"); + return NULL; + } + if (print(&ctx, b)) { + free(data); + return NULL; + } + assert(ctx.pos == size); + data[ctx.pos] = 0; + return data; +} + +struct bencode *ben_str(const char *s) +{ + return ben_blob(s, strlen(s)); +} + +const char *ben_strerror(int error) +{ + switch (error) { + case BEN_OK: + return "OK (no error)"; + case BEN_INVALID: + return "Invalid data"; + case BEN_INSUFFICIENT: + return "Insufficient amount of data (need more data)"; + case BEN_NO_MEMORY: + return "Out of memory"; + case BEN_MISMATCH: + return "A given structure did not match unpack format"; + default: + fprintf(stderr, "Unknown error code: %d\n", error); + return NULL; + } +} + +static int unpack_pointer(const struct bencode *b, struct ben_decode_ctx *ctx, + va_list *vl) +{ + const char **str; + const struct bencode **ptr; + + ctx->off++; + + if (ctx->off >= ctx->len) + return insufficient(ctx); + + switch (ben_current_char(ctx)) { + case 's': /* %ps */ + ctx->off++; + if (b->type != BENCODE_STR) + return mismatch(ctx); + str = va_arg(*vl, const char **); + *str = ben_str_val(b); + return 0; + + case 'b': /* %pb */ + ctx->off++; + ptr = va_arg(*vl, const struct bencode **); + *ptr = b; + return 0; + + default: + return invalid(ctx); + } +} + +static int unpack_value(const struct bencode *b, struct ben_decode_ctx *ctx, + va_list *vl) +{ + long long val; + long long *ll; + long *l; + int *i; + unsigned long long *ull; + unsigned long *ul; + unsigned int *ui; + int longflag = 0; + + ctx->off++; + + while (ctx->off < ctx->len) { + switch (ben_current_char(ctx)) { + case 'l': + ctx->off++; + longflag++; + break; + case 'L': + case 'q': + ctx->off++; + longflag = 2; + break; + + case 'p': + return unpack_pointer(b, ctx, vl); + + /* signed */ + case 'd': + ctx->off++; + if (b->type != BENCODE_INT) + return mismatch(ctx); + val = ben_int_val(b); + switch (longflag) { + case 0: + i = va_arg(*vl, int *); + *i = val; + /* Test that no information was lost in conversion */ + if ((long long) *i != val) + return mismatch(ctx); + break; + case 1: + l = va_arg(*vl, long *); + *l = val; + if ((long long) *l != val) + return mismatch(ctx); + break; + case 2: + ll = va_arg(*vl, long long *); + *ll = val; + break; + } + return 0; + + /* unsigned */ + case 'u': + ctx->off++; + if (b->type != BENCODE_INT) + return mismatch(ctx); + val = ben_int_val(b); + if (val < 0) + return mismatch(ctx); + switch (longflag) { + case 0: + ui = va_arg(*vl, unsigned int *); + *ui = val; + if ((long long) *ui != val) + return mismatch(ctx); + break; + case 1: + ul = va_arg(*vl, unsigned long *); + *ul = val; + if ((long long) *ul != val) + return mismatch(ctx); + break; + case 2: + ull = va_arg(*vl, unsigned long long *); + *ull = val; + break; + } + return 0; + + default: + return invalid(ctx); + } + } + return insufficient(ctx); +} + +static int unpack_dict(const struct bencode *b, struct ben_decode_ctx *ctx, + va_list *vl) +{ + struct bencode *key = NULL; + const struct bencode *val; + + if (b->type != BENCODE_DICT) + return mismatch(ctx); + + ctx->off++; + + while (1) { + if (seek_char(ctx)) + return -1; + + if (ben_current_char(ctx) == '}') { + ctx->off++; + break; + } + switch (ben_current_char(ctx)) { + case '\'': + case '"': + key = decode_printed_str(ctx); + break; + case '-': + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + key = decode_printed_int(ctx); + break; + default: + return invalid(ctx); + } + if (key == NULL) + return -1; + val = ben_dict_get(b, key); + ben_free(key); + if (val == NULL) + return mismatch(ctx); + + if (seek_char(ctx)) + return -1; + if (ben_current_char(ctx) != ':') + return invalid(ctx); + ctx->off++; + + if (unpack(val, ctx, vl)) + return -1; + + if (seek_char(ctx)) + return -1; + if (ben_current_char(ctx) == ',') + ctx->off++; + else if (ben_current_char(ctx) != '}') + return invalid(ctx); + } + return 0; +} + +static int unpack_list(const struct bencode *b, struct ben_decode_ctx *ctx, + va_list *vl) +{ + const struct bencode_list *list; + size_t i = 0; + + if (b->type != BENCODE_LIST) + return mismatch(ctx); + list = ben_list_const_cast(b); + + ctx->off++; + + while (1) { + if (seek_char(ctx)) + return -1; + + if (ben_current_char(ctx) == ']') { + ctx->off++; + break; + } + if (i >= list->n) + return mismatch(ctx); + if (unpack(list->values[i], ctx, vl)) + return -1; + i++; + + if (seek_char(ctx)) + return -1; + if (ben_current_char(ctx) == ',') + ctx->off++; + else if (ben_current_char(ctx) != ']') + return invalid(ctx); + } + if (i != list->n) + return mismatch(ctx); + return 0; +} + +static int unpack(const struct bencode *b, struct ben_decode_ctx *ctx, + va_list *vl) +{ + if (seek_char(ctx)) + return insufficient(ctx); + + switch (ben_current_char(ctx)) { + case '{': + return unpack_dict(b, ctx, vl); + case '[': + return unpack_list(b, ctx, vl); + case '%': + return unpack_value(b, ctx, vl); + default: + break; + } + return -1; +} + +static int unpack_all(const struct bencode *b, struct ben_decode_ctx *ctx, + va_list *vl) +{ + if (unpack(b, ctx, vl)) + return -1; + /* check for left over characters */ + seek_char(ctx); + ctx->error = 0; + if (ctx->off < ctx->len) + return invalid(ctx); + return 0; +} + +int ben_unpack(const struct bencode *b, const char *fmt, ...) +{ + struct ben_decode_ctx ctx = {.data = fmt, .len = strlen(fmt)}; + int ret; + va_list vl; + va_start(vl, fmt); + ret = unpack_all(b, &ctx, &vl); + va_end(vl); + return ret; +} + +int ben_unpack2(const struct bencode *b, size_t *off, struct bencode_error *error, const char *fmt, ...) +{ + struct ben_decode_ctx ctx = {.data = fmt, .len = strlen(fmt)}; + int ret; + va_list vl; + va_start(vl, fmt); + ret = unpack_all(b, &ctx, &vl); + va_end(vl); + + *off = ctx.off; + if (error != NULL) { + assert((ret == 0) ^ (ctx.error != 0)); + error->error = ctx.error; + if (ret != 0) { + error->off = 0; + error->line = 0; + } else { + error->off = ctx.off; + error->line = ctx.line; + } + } + return 0; +} + +static struct bencode *pack_pointer(struct ben_decode_ctx *ctx, va_list *vl) +{ + struct bencode *b = NULL; + + ctx->off++; + + if (ctx->off >= ctx->len) + return ben_insufficient_ptr(ctx); + + switch (ben_current_char(ctx)) { + case 'b': /* %pb */ + ctx->off++; + b = va_arg(*vl, struct bencode *); + break; + default: + return ben_invalid_ptr(ctx); + } + return b; +} + +static struct bencode *pack_value(struct ben_decode_ctx *ctx, va_list *vl) +{ + struct bencode *b = NULL; + unsigned long long ull; + long long val; + int longflag = 0; + + ctx->off++; + + while (ctx->off < ctx->len) { + switch (ben_current_char(ctx)) { + case 'l': + ctx->off++; + longflag++; + break; + case 'L': + case 'q': + ctx->off++; + longflag = 2; + break; + + case 's': + ctx->off++; + b = ben_str(va_arg(*vl, const char *)); + if (b == NULL) + return ben_oom_ptr(ctx); + break; + + case 'p': + b = pack_pointer(ctx, vl); + break; + + /* signed */ + case 'd': + ctx->off++; + switch (longflag) { + case 0: + val = va_arg(*vl, int); + break; + case 1: + val = va_arg(*vl, long); + break; + case 2: + val = va_arg(*vl, long long); + break; + default: + return ben_invalid_ptr(ctx); + } + b = ben_int(val); + if (b == NULL) + return ben_oom_ptr(ctx); + break; + + /* unsigned */ + case 'u': + ctx->off++; + switch (longflag) { + case 0: + val = va_arg(*vl, unsigned int); + break; + case 1: + val = va_arg(*vl, unsigned long); + break; + case 2: + ull = va_arg(*vl, unsigned long long); + /* Check that no information was lost */ + val = ull; + if ((long long) ull != val) + return ben_invalid_ptr(ctx); + break; + default: + return ben_invalid_ptr(ctx); + } + b = ben_int(val); + if (b == NULL) + return ben_oom_ptr(ctx); + break; + + default: + return ben_invalid_ptr(ctx); + } + if (b) + return b; + } + return ben_insufficient_ptr(ctx); +} + +static struct bencode *pack_dict(struct ben_decode_ctx *ctx, va_list *vl) +{ + struct bencode *d = ben_dict(); + struct bencode *key = NULL; + struct bencode *value = NULL; + + if (d == NULL) + return ben_oom_ptr(ctx); + + ctx->off++; + + while (1) { + if (seek_char(ctx)) + goto nullpath; + + if (ben_current_char(ctx) == '}') { + ctx->off++; + break; + } + key = pack(ctx, vl); + if (key == NULL) + goto nullpath; + + if (seek_char(ctx)) + goto nullpath; + if (ben_current_char(ctx) != ':') + goto invalidpath; + ctx->off++; + + value = pack(ctx, vl); + if (value == NULL) + goto nullpath; + + if (ben_dict_set(d, key, value)) { + ben_free(key); + ben_free(value); + ben_free(d); + return ben_oom_ptr(ctx); + } + key = NULL; + value = NULL; + + if (seek_char(ctx)) + goto nullpath; + if (ben_current_char(ctx) == ',') + ctx->off++; + else if (ben_current_char(ctx) != '}') + goto invalidpath; + } + return d; + +nullpath: + ben_free(d); + ben_free(key); + ben_free(value); + return NULL; + +invalidpath: + ben_free(d); + ben_free(key); + ben_free(value); + return ben_invalid_ptr(ctx); +} + +static struct bencode *pack_list(struct ben_decode_ctx *ctx, va_list *vl) +{ + struct bencode *l = ben_list(); + struct bencode *val = NULL; + + if (l == NULL) + return ben_oom_ptr(ctx); + + ctx->off++; + + while (1) { + if (seek_char(ctx)) + goto nullpath; + + if (ben_current_char(ctx) == ']') { + ctx->off++; + break; + } + val = pack(ctx, vl); + if (val == NULL) + goto nullpath; + + if (ben_list_append(l, val)) { + ben_free(val); + ben_free(l); + return ben_oom_ptr(ctx); + } + val = NULL; + + if (seek_char(ctx)) + goto nullpath; + if (ben_current_char(ctx) == ',') + ctx->off++; + else if (ben_current_char(ctx) != ']') { + ben_free(l); + return ben_invalid_ptr(ctx); + } + } + + return l; + +nullpath: + ben_free(l); + ben_free(val); + return NULL; +} + +static struct bencode *pack(struct ben_decode_ctx *ctx, va_list *vl) +{ + if (seek_char(ctx)) + return ben_insufficient_ptr(ctx); + + switch (ben_current_char(ctx)) { + case '\'': + case '"': + return decode_printed_str(ctx); + case '-': + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + return decode_printed_int(ctx); + case 'F': + case 'T': + return decode_printed_bool(ctx); + case '{': + return pack_dict(ctx, vl); + case '[': + return pack_list(ctx, vl); + case '%': + return pack_value(ctx, vl); + default: + return ben_invalid_ptr(ctx); + } + return NULL; +} + +struct bencode *ben_pack(const char *fmt, ...) +{ + struct ben_decode_ctx ctx = {.data = fmt, .len = strlen(fmt)}; + struct bencode *b; + va_list vl; + va_start(vl, fmt); + b = pack(&ctx, &vl); + va_end(vl); + + /* check for left over characters */ + seek_char(&ctx); + if (ctx.off < ctx.len) { + ben_free(b); + return NULL; + } + return b; +} diff --git a/libs/libks/src/ks_buffer.c b/libs/libks/src/ks_buffer.c index b78584e984..1f5254c40d 100644 --- a/libs/libks/src/ks_buffer.c +++ b/libs/libks/src/ks_buffer.c @@ -32,7 +32,7 @@ */ -#include "ks_buffer.h" +#include "ks.h" static unsigned buffer_id = 0; @@ -61,7 +61,7 @@ KS_DECLARE(ks_status_t) ks_buffer_create(ks_buffer_t **buffer, ks_size_t blocksi new_buffer->data = malloc(start_len); if (!new_buffer->data) { free(new_buffer); - return KS_FAIL; + return KS_STATUS_FAIL; } memset(new_buffer->data, 0, start_len); } @@ -73,10 +73,10 @@ KS_DECLARE(ks_status_t) ks_buffer_create(ks_buffer_t **buffer, ks_size_t blocksi new_buffer->head = new_buffer->data; *buffer = new_buffer; - return KS_SUCCESS; + return KS_STATUS_SUCCESS; } - return KS_FAIL; + return KS_STATUS_FAIL; } KS_DECLARE(ks_size_t) ks_buffer_len(ks_buffer_t *buffer) @@ -163,7 +163,7 @@ KS_DECLARE(ks_size_t) ks_buffer_read_loop(ks_buffer_t *buffer, void *data, ks_si } buffer->head = buffer->data; buffer->used = buffer->actually_used; - len = ks_buffer_read(buffer, (char*)data + len, datalen - len); + len = ks_buffer_read(buffer, (char *) data + len, datalen - len); buffer->loops--; } return len; @@ -199,22 +199,23 @@ KS_DECLARE(ks_size_t) ks_buffer_packet_count(ks_buffer_t *buffer) { char *pe, *p, *e, *head = (char *) buffer->head; ks_size_t x = 0; - + ks_assert(buffer != NULL); e = (head + buffer->used); for (p = head; p && *p && p < e; p++) { if (*p == '\n') { - pe = p+1; - if (*pe == '\r') pe++; + pe = p + 1; + if (*pe == '\r') + pe++; if (pe <= e && *pe == '\n') { p = pe++; x++; } } } - + return x; } @@ -230,8 +231,9 @@ KS_DECLARE(ks_size_t) ks_buffer_read_packet(ks_buffer_t *buffer, void *data, ks_ for (p = head; p && *p && p < e; p++) { if (*p == '\n') { - pe = p+1; - if (*pe == '\r') pe++; + pe = p + 1; + if (*pe == '\r') + pe++; if (pe <= e && *pe == '\n') { pe++; datalen = pe - head; @@ -242,7 +244,7 @@ KS_DECLARE(ks_size_t) ks_buffer_read_packet(ks_buffer_t *buffer, void *data, ks_ } } } - + return ks_buffer_read(buffer, data, datalen); } @@ -268,16 +270,16 @@ KS_DECLARE(ks_size_t) ks_buffer_write(ks_buffer_t *buffer, const void *data, ks_ freespace = buffer->datalen - buffer->used; /* - if (buffer->data != buffer->head) { - memmove(buffer->data, buffer->head, buffer->used); - buffer->head = buffer->data; - } - */ - + if (buffer->data != buffer->head) { + memmove(buffer->data, buffer->head, buffer->used); + buffer->head = buffer->data; + } + */ + if (freespace < datalen) { ks_size_t new_size, new_block_size; void *data1; - + new_size = buffer->datalen + datalen; new_block_size = buffer->datalen + buffer->blocksize; @@ -293,7 +295,7 @@ KS_DECLARE(ks_size_t) ks_buffer_write(ks_buffer_t *buffer, const void *data, ks_ buffer->head = buffer->data; buffer->datalen = new_size; } - + freespace = buffer->datalen - buffer->used; @@ -322,7 +324,7 @@ KS_DECLARE(void) ks_buffer_zero(ks_buffer_t *buffer) KS_DECLARE(ks_size_t) ks_buffer_zwrite(ks_buffer_t *buffer, const void *data, ks_size_t datalen) { ks_size_t w; - + if (!(w = ks_buffer_write(buffer, data, datalen))) { ks_buffer_zero(buffer); return ks_buffer_write(buffer, data, datalen); diff --git a/libs/libks/src/ks_config.c b/libs/libks/src/ks_config.c index 3b25dd1634..dbde56ff73 100644 --- a/libs/libks/src/ks_config.c +++ b/libs/libks/src/ks_config.c @@ -32,7 +32,6 @@ */ #include "ks.h" -#include "ks_config.h" KS_DECLARE(int) ks_config_open_file(ks_config_t *cfg, const char *file_path) { @@ -157,7 +156,7 @@ KS_DECLARE(int) ks_config_next_pair(ks_config_t *cfg, char **var, char **val) } - if ((end = strchr(*var, ';')) && *(end+1) == *end) { + if ((end = strchr(*var, ';')) && *(end + 1) == *end) { *end = '\0'; end--; } else if ((end = strchr(*var, '\n')) != 0) { diff --git a/libs/libks/src/ks_dht.c b/libs/libks/src/ks_dht.c new file mode 100644 index 0000000000..31ad823b89 --- /dev/null +++ b/libs/libks/src/ks_dht.c @@ -0,0 +1,4137 @@ +/* +Copyright (c) 2009-2011 by Juliusz Chroboczek + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ + +/* Please, please, please. + + You are welcome to integrate this code in your favourite Bittorrent + client. Please remember, however, that it is meant to be usable by + others, including myself. This means no C++, no relicensing, and no + gratuitious changes to the coding style. And please send back any + improvements to the author. */ + +/* Sorry dude, we hacked up this code pretty good but its not C++ and the license is pure BSD. +Like Meatloaf says, 2 out of 3 ain't bad! But we needed a good base for some additions we needed */ + + +#include "ks.h" +#include "sodium.h" + +#ifndef MSG_CONFIRM +#define MSG_CONFIRM 0 +#endif + +KS_DECLARE(int) dht_blacklisted(const ks_sockaddr_t *sa) +{ + return 0; +} + +KS_DECLARE(void) dht_hash(void *hash_return, int hash_size, const void *v1, int len1, const void *v2, int len2, const void *v3, int len3) +{ + crypto_generichash_state state; + + crypto_generichash_init(&state, NULL, 0, hash_size); + crypto_generichash_update(&state, v1, len1); + crypto_generichash_update(&state, v2, len2); + crypto_generichash_update(&state, v3, len3); + crypto_generichash_final(&state, (unsigned char *)hash_return, hash_size); + + return; +} + +/* +KS_DECLARE(int) dht_random_bytes(void *buf, size_t size) +{ +return 0; +} +*/ + +#ifdef _WIN32 + +#undef EAFNOSUPPORT +#define EAFNOSUPPORT WSAEAFNOSUPPORT + +static int random(void) +{ + return rand(); +} + +/* Windows Vista and later already provide the implementation. */ +#if _WIN32_WINNT < 0x0600 +extern const char *inet_ntop(int, const void *, char *, socklen_t); +#endif + +#else +#endif + +/* We set sin_family to 0 to mark unused slots. */ +#if AF_INET == 0 || AF_INET6 == 0 +#error You lose +#endif + +#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L +/* nothing */ +#elif defined(__GNUC__) +#define inline __inline +#if (__GNUC__ >= 3) +#define restrict __restrict +#else +#define restrict /**/ +#endif +#else +#define inline /**/ +#define restrict /**/ +#endif + +#define MAX(x, y) ((x) >= (y) ? (x) : (y)) +#define MIN(x, y) ((x) <= (y) ? (x) : (y)) + +struct node { + unsigned char id[20]; + ks_sockaddr_t ss; + time_t time; /* time of last message received */ + time_t reply_time; /* time of last correct reply received */ + time_t pinged_time; /* time of last request */ + int pinged; /* how many requests we sent since last reply */ + struct node *next; +}; + +struct bucket { + int af; + unsigned char first[20]; + int count; /* number of nodes */ + time_t time; /* time of last reply in this bucket */ + struct node *nodes; + ks_sockaddr_t cached; /* the address of a likely candidate */ + struct bucket *next; +}; + +struct search_node { + unsigned char id[20]; + ks_sockaddr_t ss; + time_t request_time; /* the time of the last unanswered request */ + time_t reply_time; /* the time of the last reply */ + int pinged; + unsigned char token[40]; + int token_len; + int replied; /* whether we have received a reply */ + int acked; /* whether they acked our announcement */ +}; + +/* When performing a search, we search for up to SEARCH_NODES closest nodes + to the destination, and use the additional ones to backtrack if any of + the target 8 turn out to be dead. */ +#define SEARCH_NODES 14 + +struct search { + unsigned short tid; + int af; + time_t step_time; /* the time of the last search_step */ + unsigned char id[20]; + unsigned short port; /* 0 for pure searches */ + int done; + struct search_node nodes[SEARCH_NODES]; + int numnodes; + struct search *next; +}; + +struct peer { + time_t time; + ks_sockaddr_t addr; +}; + +/* The maximum number of peers we store for a given hash. */ +#ifndef DHT_MAX_PEERS +#define DHT_MAX_PEERS 2048 +#endif + +/* The maximum number of hashes we're willing to track. */ +#ifndef DHT_MAX_HASHES +#define DHT_MAX_HASHES 16384 +#endif + +/* The maximum number of searches we keep data about. */ +#ifndef DHT_MAX_SEARCHES +#define DHT_MAX_SEARCHES 1024 +#endif + +/* The time after which we consider a search to be expirable. */ +#ifndef DHT_SEARCH_EXPIRE_TIME +#define DHT_SEARCH_EXPIRE_TIME (62 * 60) +#endif + +struct storage { + unsigned char id[20]; + int numpeers, maxpeers; + struct peer *peers; + struct storage *next; +}; + +static struct storage * find_storage(dht_handle_t *h, const unsigned char *id); +static void flush_search_node(struct search_node *n, struct search *sr); + +typedef enum { + DHT_MSG_INVALID = 0, + DHT_MSG_ERROR = 1, + DHT_MSG_REPLY = 2, + DHT_MSG_PING = 3, + DHT_MSG_FIND_NODE = 4, + DHT_MSG_GET_PEERS = 5, + DHT_MSG_ANNOUNCE_PEER = 6, + DHT_MSG_STORE_PUT = 7 +} dht_msg_type_t; + +#define WANT4 1 +#define WANT6 2 + +static dht_msg_type_t parse_message(struct bencode *bencode_p, + unsigned char *tid_return, int *tid_len, + unsigned char *id_return); + +static unsigned char *debug_printable(const unsigned char *buf, unsigned char *out, int buflen); +static void print_hex(FILE *f, const unsigned char *buf, int buflen); +static int is_martian(const ks_sockaddr_t *sa); +static int id_cmp(const unsigned char *restrict id1, const unsigned char *restrict id2); +static int lowbit(const unsigned char *id); +static int common_bits(const unsigned char *id1, const unsigned char *id2); +static int xorcmp(const unsigned char *id1, const unsigned char *id2, const unsigned char *ref); +static int in_bucket(const unsigned char *id, struct bucket *b); +static struct bucket *find_bucket(dht_handle_t *h, unsigned const char *id, int af); +static struct bucket *previous_bucket(dht_handle_t *h, struct bucket *b); +static struct node *find_node(dht_handle_t *h, const unsigned char *id, int af); +static struct node *random_node(struct bucket *b); +static int bucket_middle(struct bucket *b, unsigned char *id_return); +static int bucket_random(struct bucket *b, unsigned char *id_return); +static struct node *insert_node(dht_handle_t *h, struct node *node); +static int node_good(dht_handle_t *h, struct node *node); +static void make_tid(unsigned char *tid_return, const char *prefix, unsigned short seqno); +static int tid_match(const unsigned char *tid, const char *prefix, unsigned short *seqno_return); +static int send_cached_ping(dht_handle_t *h, struct bucket *b); +static void pinged(dht_handle_t *h, struct node *n, struct bucket *b); +static void blacklist_node(dht_handle_t *h, const unsigned char *id, const ks_sockaddr_t *sa); +static int node_blacklisted(dht_handle_t *h, const ks_sockaddr_t *sa); +static struct bucket *split_bucket(dht_handle_t *h, struct bucket *b); +static struct node *new_node(dht_handle_t *h, const unsigned char *id, const ks_sockaddr_t *sa, int confirm); +static int expire_buckets(dht_handle_t *h, struct bucket *b); +static struct search *find_search(dht_handle_t *h, unsigned short tid, int af); +static int insert_search_node(dht_handle_t *h, unsigned char *id, + const ks_sockaddr_t *sa, + struct search *sr, int replied, + unsigned char *token, int token_len); +static void flush_search_node(struct search_node *n, struct search *sr); +static void expire_searches(dht_handle_t *h); +static int search_send_get_peers(dht_handle_t *h, struct search *sr, struct search_node *n); +static void search_step(dht_handle_t *h, struct search *sr); +static struct search *new_search(dht_handle_t *h); +static void insert_search_bucket(dht_handle_t *h, struct bucket *b, struct search *sr); +static struct storage *find_storage(dht_handle_t *h, const unsigned char *id); +static int storage_store(dht_handle_t *h, const unsigned char *id, const ks_sockaddr_t *sa, unsigned short port); +static int expire_storage(dht_handle_t *h); +static int rotate_secrets(dht_handle_t *h); +static void make_token(dht_handle_t *h, const ks_sockaddr_t *sa, int old, unsigned char *token_return); +static int token_match(dht_handle_t *h, const unsigned char *token, int token_len, const ks_sockaddr_t *sa); +static void dump_bucket(dht_handle_t *h, FILE *f, struct bucket *b); + +static void reset_poll(dht_handle_t *h); +static void clear_all_ip(dht_handle_t *h); +static int token_bucket(dht_handle_t *h); +static int neighbourhood_maintenance(dht_handle_t *h, int af); +static int bucket_maintenance(dht_handle_t *h, int af); +static int dht_send(dht_handle_t *h, const void *buf, size_t len, int flags, const ks_sockaddr_t *sa); + +static int send_ping(dht_handle_t *h, const ks_sockaddr_t *sa, const unsigned char *tid, int tid_len); +static int send_pong(dht_handle_t *h, const ks_sockaddr_t *sa, const unsigned char *tid, int tid_len); +static int send_find_node(dht_handle_t *h, const ks_sockaddr_t *sa, const unsigned char *tid, int tid_len, const unsigned char *target, int target_len, + int want, int confirm); +static int send_nodes_peers(dht_handle_t *h, const ks_sockaddr_t *sa, const unsigned char *tid, int tid_len, const unsigned char *nodes, int nodes_len, + const unsigned char *nodes6, int nodes6_len, int af, struct storage *st, const unsigned char *token, int token_len); +static int insert_closest_node(unsigned char *nodes, int numnodes, const unsigned char *id, struct node *n); +static int buffer_closest_nodes(dht_handle_t *h, unsigned char *nodes, int numnodes, const unsigned char *id, struct bucket *b); +static int send_closest_nodes(dht_handle_t *h, const ks_sockaddr_t *sa, const unsigned char *tid, int tid_len, const unsigned char *id, int want, + int af, struct storage *st, const unsigned char *token, int token_len); +static int send_get_peers(dht_handle_t *h, const ks_sockaddr_t *sa, unsigned char *tid, int tid_len, unsigned char *infohash, int want, int confirm); +static int send_announce_peer(dht_handle_t *h, const ks_sockaddr_t *sa, unsigned char *tid, int tid_len, unsigned char *infohas, unsigned short port, + unsigned char *token, int token_len, int confirm); +static int send_peer_announced(dht_handle_t *h, const ks_sockaddr_t *sa, unsigned char *tid, int tid_len); +static int send_error(dht_handle_t *h, const ks_sockaddr_t *sa, unsigned char *tid, int tid_len, int code, const char *message); + +static dht_msg_type_t parse_message(struct bencode *bencode_p, unsigned char *tid_return, int *tid_len, unsigned char *id_return); +static int b64encode(unsigned char *in, ks_size_t ilen, unsigned char *out, ks_size_t olen); + + +static const unsigned char zeroes[20] = {0}; +//static const unsigned char v4prefix[16] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF, 0, 0, 0, 0 }; + +#define MAX_TOKEN_BUCKET_TOKENS 400 + +/* The maximum number of nodes that we snub. There is probably little + reason to increase this value. */ +#ifndef DHT_MAX_BLACKLISTED +#define DHT_MAX_BLACKLISTED 10 +#endif + +struct ks_dht_store_entry_s { + const char *key; + + ks_time_t received; /* recieved timestamp */ + ks_time_t last_announce; + ks_time_t expiration; /* When should 'my' message be automatically expired. If not set will be expired after 10 minutes */ + + /* Top level struct pointers. Will need to be freed */ + struct bencode *bencode_message_raw; + struct bencode *payload_bencode; + cJSON *body; + + /* Short cut accessor pointers. Do not free these. */ + const char *content_type; + const char *payload_raw; + + unsigned int serial; + ks_bool_t mine; + ks_pool_t *pool; +}; + +struct ks_dht_store_s { + ks_time_t next_expiring; + ks_hash_t *hash; + ks_pool_t *pool; +}; + +typedef struct { + char ip[80]; + ks_sockaddr_t addr; + ks_socket_t sock; +} ks_ip_t; + +struct dht_handle_s { + ks_pool_t *pool; + + struct pollfd *pollsocks; + ks_ip_t **iptsocks; + ks_sockaddr_t **addrsocks; + uint32_t num_pollsocks; + + //int dht_socket; + //int dht_socket6; + + time_t search_time; + time_t confirm_nodes_time; + time_t rotate_secrets_time; + + unsigned char myid[20]; + int have_v; + unsigned char my_v[9]; + unsigned char secret[8]; + unsigned char oldsecret[8]; + unsigned int port; + + struct bucket *buckets; + struct bucket *buckets6; + struct storage *storage; + int numstorage; + + struct search *searches; + int numsearches; + unsigned short search_id; + + ks_sockaddr_t blacklist[DHT_MAX_BLACKLISTED]; + int next_blacklisted; + + ks_time_t now; + time_t mybucket_grow_time, mybucket6_grow_time; + time_t expire_stuff_time; + + time_t token_bucket_time; + int token_bucket_tokens; + + ks_dht_store_entry_json_cb *store_json_cb; + void *store_json_cb_arg; + + ks_hash_t *iphash; + + struct ks_dht_store_s *store; + + dht_callback_t callback; + void *closure; + + uint32_t ip4s; + uint32_t ip6s; + + int af_flags; + + int tosleep; + + int autoroute; + + int started; +}; + +static ks_ip_t *add_ip(dht_handle_t *h, const char *ip, int port, int family); + +static void ks_dht_store_entry_destroy(struct ks_dht_store_entry_s **old_entry); +static int ks_dht_store_entry_create(struct dht_handle_s *h, struct bencode *msg, struct ks_dht_store_entry_s **new_entry, ks_time_t life, ks_bool_t mine); +static struct ks_dht_store_entry_s *ks_dht_store_fetch(struct ks_dht_store_s *store, char *key); +static int ks_dht_store_insert(struct ks_dht_store_s *store, struct ks_dht_store_entry_s *entry, ks_time_t now); +static int ks_dht_store_replace(struct ks_dht_store_s *store, struct ks_dht_store_entry_s *entry); +static void ks_dht_store_prune(struct ks_dht_store_s *store, ks_time_t now); +static int ks_dht_store_create(ks_pool_t *pool, struct ks_dht_store_s **new_store); +static void ks_dht_store_destroy(struct ks_dht_store_s **old_store); + + +KS_DECLARE(void) ks_dht_store_entry_json_cb_set(struct dht_handle_s *h, ks_dht_store_entry_json_cb *store_json_cb, void *arg) +{ + h->store_json_cb = store_json_cb; + h->store_json_cb_arg = arg; +} + +static unsigned char *debug_printable(const unsigned char *buf, unsigned char *out, int buflen) +{ + int i; + for (i = 0; i < buflen; i++) { + out[i] = (buf[i] >= 32 && buf[i] <= 126) ? buf[i] : '.'; + } + return out; +} + +static void print_hex(FILE *f, const unsigned char *buf, int buflen) +{ + int i; + for (i = 0; i < buflen; i++) { + fprintf(f, "%02x", buf[i]); + } +} + +static int is_martian(const ks_sockaddr_t *sa) +{ + switch(sa->family) { + case AF_INET: { + return (sa->port == 0); + } + case AF_INET6: { + return (sa->port == 0); + } + + default: + return 1; + } +} + +/* Forget about the ``XOR-metric''. An id is just a path from the + root of the tree, so bits are numbered from the start. */ + +static int id_cmp(const unsigned char *restrict id1, const unsigned char *restrict id2) +{ + /* Memcmp is guaranteed to perform an unsigned comparison. */ + return memcmp(id1, id2, 20); +} + +/* Find the lowest 1 bit in an id. */ +static int lowbit(const unsigned char *id) +{ + int i, j; + + for (i = 19; i >= 0; i--) { + if (id[i] != 0) { + break; + } + } + + if (i < 0) return -1; + + for (j = 7; j >= 0; j--) { + if ((id[i] & (0x80 >> j)) != 0) { + break; + } + } + + return 8 * i + j; +} + +/* Find how many bits two ids have in common. */ +static int common_bits(const unsigned char *id1, const unsigned char *id2) +{ + int i, j; + unsigned char xor; + for (i = 0; i < 20; i++) { + if (id1[i] != id2[i]) { + break; + } + } + + if (i == 20) { + return 160; + } + + xor = id1[i] ^ id2[i]; + + j = 0; + while ((xor & 0x80) == 0) { + xor <<= 1; + j++; + } + + return 8 * i + j; +} + +/* Determine whether id1 or id2 is closer to ref */ +static int xorcmp(const unsigned char *id1, const unsigned char *id2, const unsigned char *ref) +{ + int i; + for (i = 0; i < 20; i++) { + unsigned char xor1, xor2; + if (id1[i] == id2[i]) { + continue; + } + xor1 = id1[i] ^ ref[i]; + xor2 = id2[i] ^ ref[i]; + if (xor1 < xor2) { + return -1; + } + return 1; + } + return 0; +} + +/* We keep buckets in a sorted linked list. A bucket b ranges from + b->first inclusive up to b->next->first exclusive. */ +static int in_bucket(const unsigned char *id, struct bucket *b) +{ + return id_cmp(b->first, id) <= 0 && (b->next == NULL || id_cmp(id, b->next->first) < 0); +} + +static struct bucket *find_bucket(dht_handle_t *h, unsigned const char *id, int af) +{ + struct bucket *b = af == AF_INET ? h->buckets : h->buckets6; + + if (b == NULL) { + return NULL; + } + + while (1) { + if (b->next == NULL) { + return b; + } + + if (id_cmp(id, b->next->first) < 0) { + return b; + } + + b = b->next; + } +} + +static struct bucket *previous_bucket(dht_handle_t *h, struct bucket *b) +{ + struct bucket *p = b->af == AF_INET ? h->buckets : h->buckets6; + + if (b == p) { + return NULL; + } + + while (1) { + if (p->next == NULL) { + return NULL; + } + + if (p->next == b) { + return p; + } + + p = p->next; + } +} + +/* Every bucket contains an unordered list of nodes. */ +static struct node *find_node(dht_handle_t *h, const unsigned char *id, int af) +{ + struct bucket *b = find_bucket(h, id, af); + struct node *n; + + if (b == NULL) + return NULL; + + n = b->nodes; + while (n) { + if (id_cmp(n->id, id) == 0) { + return n; + } + n = n->next; + } + return NULL; +} + +/* Return a random node in a bucket. */ +static struct node *random_node(struct bucket *b) +{ + struct node *n; + int nn; + + if (b->count == 0) { + return NULL; + } + + nn = random() % b->count; + n = b->nodes; + while (nn > 0 && n) { + n = n->next; + nn--; + } + return n; +} + +/* Return the middle id of a bucket. */ +static int bucket_middle(struct bucket *b, unsigned char *id_return) +{ + int bit1 = lowbit(b->first); + int bit2 = b->next ? lowbit(b->next->first) : -1; + int bit = MAX(bit1, bit2) + 1; + + if (bit >= 160) { + return -1; + } + + memcpy(id_return, b->first, 20); + id_return[bit / 8] |= (0x80 >> (bit % 8)); + return 1; +} + +/* Return a random id within a bucket. */ +static int bucket_random(struct bucket *b, unsigned char *id_return) +{ + int bit1 = lowbit(b->first); + int bit2 = b->next ? lowbit(b->next->first) : -1; + int bit = MAX(bit1, bit2) + 1; + int i; + + if (bit >= 160) { + memcpy(id_return, b->first, 20); + return 1; + } + + memcpy(id_return, b->first, bit / 8); + id_return[bit / 8] = b->first[bit / 8] & (0xFF00 >> (bit % 8)); + id_return[bit / 8] |= random() & 0xFF >> (bit % 8); + for (i = bit / 8 + 1; i < 20; i++) { + id_return[i] = random() & 0xFF; + } + return 1; +} + +/* Insert a new node into a bucket. */ +static struct node *insert_node(dht_handle_t *h, struct node *node) +{ + struct bucket *b = find_bucket(h, node->id, node->ss.family); + + if (b == NULL) { + return NULL; + } + + node->next = b->nodes; + b->nodes = node; + b->count++; + return node; +} + +/* This is our definition of a known-good node. */ +static int node_good(dht_handle_t *h, struct node *node) +{ + return node->pinged <= 2 && node->reply_time >= h->now - 7200 && node->time >= h->now - 900; +} + +/* Our transaction-ids are 4-bytes long, with the first two bytes identi- + fying the kind of request, and the remaining two a sequence number in + host order. */ + +static void make_tid(unsigned char *tid_return, const char *prefix, unsigned short seqno) +{ + tid_return[0] = prefix[0] & 0xFF; + tid_return[1] = prefix[1] & 0xFF; + memcpy(tid_return + 2, &seqno, 2); +} + +static int tid_match(const unsigned char *tid, const char *prefix, unsigned short *seqno_return) +{ + if (tid[0] == (prefix[0] & 0xFF) && tid[1] == (prefix[1] & 0xFF)) { + if (seqno_return) { + memcpy(seqno_return, tid + 2, 2); + } + return 1; + } + + return 0; +} + +/* Every bucket caches the address of a likely node. Ping it. */ +static int send_cached_ping(dht_handle_t *h, struct bucket *b) +{ + unsigned char tid[4]; + int rc; + /* We set family to 0 when there's no cached node. */ + if (b->cached.family == 0) { + return 0; + } + + ks_log(KS_LOG_DEBUG, "Sending ping to cached node.\n"); + make_tid(tid, "pn", 0); + rc = send_ping(h, &b->cached, tid, 4); + b->cached.family = 0; + return rc; +} + +/* Called whenever we send a request to a node, increases the ping count + and, if that reaches 3, sends a ping to a new candidate. */ +static void pinged(dht_handle_t *h, struct node *n, struct bucket *b) +{ + n->pinged++; + n->pinged_time = h->now; + if (n->pinged >= 3) { + send_cached_ping(h, b ? b : find_bucket(h, n->id, n->ss.family)); + } +} + +/* The internal blacklist is an LRU cache of nodes that have sent + incorrect messages. */ +static void blacklist_node(dht_handle_t *h, const unsigned char *id, const ks_sockaddr_t *sa) +{ + int i; + + ks_log(KS_LOG_DEBUG, "Blacklisting broken node.\n"); + + if (id) { + struct node *n; + struct search *sr; + /* Make the node easy to discard. */ + n = find_node(h, id, sa->family); + if (n) { + n->pinged = 3; + pinged(h, n, NULL); + } + /* Discard it from any searches in progress. */ + sr = h->searches; + while (sr) { + for (i = 0; i < sr->numnodes; i++) { + if (id_cmp(sr->nodes[i].id, id) == 0) { + flush_search_node(&sr->nodes[i], sr); + } + } + sr = sr->next; + } + } + /* And make sure we don't hear from it again. */ + ks_addr_copy(&h->blacklist[h->next_blacklisted], sa); + h->next_blacklisted = (h->next_blacklisted + 1) % DHT_MAX_BLACKLISTED; +} + +static int node_blacklisted(dht_handle_t *h, const ks_sockaddr_t *sa) +{ + int i; + + if (dht_blacklisted(sa)) { + return 1; + } + + for(i = 0; i < DHT_MAX_BLACKLISTED; i++) { + if (ks_addr_cmp(&h->blacklist[i], sa)) { + return 1; + } + } + + return 0; +} + +/* Split a bucket into two equal parts. */ +static struct bucket *split_bucket(dht_handle_t *h, struct bucket *b) +{ + struct bucket *new; + struct node *nodes; + int rc; + unsigned char new_id[20]; + + if ((rc = bucket_middle(b, new_id)) < 0) { + return NULL; + } + + new = ks_pool_alloc(h->pool, sizeof(struct bucket)); + + new->af = b->af; + + send_cached_ping(h, b); + + memcpy(new->first, new_id, 20); + new->time = b->time; + + nodes = b->nodes; + b->nodes = NULL; + b->count = 0; + new->next = b->next; + b->next = new; + while (nodes) { + struct node *n; + n = nodes; + nodes = nodes->next; + insert_node(h, n); + } + return b; +} + +/* We just learnt about a node, not necessarily a new one. Confirm is 1 if + the node sent a message, 2 if it sent us a reply. */ +static struct node *new_node(dht_handle_t *h, const unsigned char *id, const ks_sockaddr_t *sa, int confirm) +{ + struct bucket *b = find_bucket(h, id, sa->family); + struct node *n; + int mybucket, split; + + if (b == NULL) { + return NULL; + } + + if (id_cmp(id, h->myid) == 0) { + return NULL; + } + + if (is_martian(sa) || node_blacklisted(h, sa)) { + return NULL; + } + + mybucket = in_bucket(h->myid, b); + + if (confirm == 2) { + b->time = h->now; + } + + n = b->nodes; + while (n) { + if (id_cmp(n->id, id) == 0) { + if (confirm || n->time < h->now - 15 * 60) { + /* Known node. Update stuff. */ + ks_addr_copy(&n->ss, sa); + + if (confirm) { + n->time = h->now; + } + if (confirm >= 2) { + n->reply_time = h->now; + n->pinged = 0; + n->pinged_time = 0; + } + } + return n; + } + n = n->next; + } + + /* New node. */ + + if (mybucket) { + if (sa->family == AF_INET) { + h->mybucket_grow_time = h->now; + } else { + h->mybucket6_grow_time = h->now; + } + } + + /* First, try to get rid of a known-bad node. */ + n = b->nodes; + while (n) { + if (n->pinged >= 3 && n->pinged_time < h->now - 15) { + memcpy(n->id, id, 20); + ks_addr_copy(&n->ss, sa); + n->time = confirm ? h->now : 0; + n->reply_time = confirm >= 2 ? h->now : 0; + n->pinged_time = 0; + n->pinged = 0; + return n; + } + n = n->next; + } + + if (b->count >= 8) { + /* Bucket full. Ping a dubious node */ + int dubious = 0; + n = b->nodes; + while (n) { + /* Pick the first dubious node that we haven't pinged in the + last 15 seconds. This gives nodes the time to reply, but + tends to concentrate on the same nodes, so that we get rid + of bad nodes fast. */ + if (!node_good(h, n)) { + dubious = 1; + if (n->pinged_time < h->now - 15) { + unsigned char tid[4]; + ks_log(KS_LOG_DEBUG, "Sending ping to dubious node.\n"); + make_tid(tid, "pn", 0); + send_ping(h, &n->ss, tid, 4); + n->pinged++; + n->pinged_time = h->now; + break; + } + } + n = n->next; + } + + split = 0; + if (mybucket) { + if (!dubious) { + split = 1; + } + /* If there's only one bucket, split eagerly. This is + incorrect unless there's more than 8 nodes in the DHT. */ + else if (b->af == AF_INET && h->buckets->next == NULL) { + split = 1; + } else if (b->af == AF_INET6 && h->buckets6->next == NULL) { + split = 1; + } + } + + if (split) { + ks_log(KS_LOG_DEBUG, "Splitting.\n"); + b = split_bucket(h, b); + return new_node(h, id, sa, confirm); + } + + /* No space for this node. Cache it away for later. */ + if (confirm || b->cached.family == 0) { + ks_addr_copy(&b->cached, sa); + } + + return NULL; + } + + /* Create a new node. */ + n = ks_pool_alloc(h->pool, sizeof(struct node)); + + memcpy(n->id, id, 20); + ks_addr_copy(&n->ss, sa); + n->time = confirm ? h->now : 0; + n->reply_time = confirm >= 2 ? h->now : 0; + n->next = b->nodes; + b->nodes = n; + b->count++; + return n; +} + +/* Called periodically to purge known-bad nodes. Note that we're very + conservative here: broken nodes in the table don't do much harm, we'll + recover as soon as we find better ones. */ +static int expire_buckets(dht_handle_t *h, struct bucket *b) +{ + while (b) { + struct node *n, *p; + int changed = 0; + + while (b->nodes && b->nodes->pinged >= 4) { + n = b->nodes; + b->nodes = n->next; + b->count--; + changed = 1; + ks_pool_free(h->pool, n); + } + + p = b->nodes; + while (p) { + while (p->next && p->next->pinged >= 4) { + n = p->next; + p->next = n->next; + b->count--; + changed = 1; + ks_pool_free(h->pool, n); + } + p = p->next; + } + + if (changed) { + send_cached_ping(h, b); + } + + b = b->next; + } + h->expire_stuff_time = h->now + 120 + random() % 240; + return 1; +} + +/* While a search is in progress, we don't necessarily keep the nodes being + walked in the main bucket table. A search in progress is identified by + a unique transaction id, a short (and hence small enough to fit in the + transaction id of the protocol packets). */ + +static struct search *find_search(dht_handle_t *h, unsigned short tid, int af) +{ + struct search *sr = h->searches; + while (sr) { + if (sr->tid == tid && sr->af == af) { + return sr; + } + sr = sr->next; + } + return NULL; +} + +/* A search contains a list of nodes, sorted by decreasing distance to the + target. We just got a new candidate, insert it at the right spot or + discard it. */ + +static int insert_search_node(dht_handle_t *h, unsigned char *id, + const ks_sockaddr_t *sa, + struct search *sr, int replied, + unsigned char *token, int token_len) +{ + struct search_node *n; + int i, j; + + if (sa->family != sr->af) { + ks_log(KS_LOG_DEBUG, "Attempted to insert node in the wrong family.\n"); + return 0; + } + + for(i = 0; i < sr->numnodes; i++) { + if (id_cmp(id, sr->nodes[i].id) == 0) { + n = &sr->nodes[i]; + goto found; + } + if (xorcmp(id, sr->nodes[i].id, sr->id) < 0) { + break; + } + } + + if (i == SEARCH_NODES) { + return 0; + } + + if (sr->numnodes < SEARCH_NODES) { + sr->numnodes++; + } + + for (j = sr->numnodes - 1; j > i; j--) { + sr->nodes[j] = sr->nodes[j - 1]; + } + + n = &sr->nodes[i]; + + memset(n, 0, sizeof(struct search_node)); + memcpy(n->id, id, 20); + +found: + + ks_addr_copy(&n->ss, sa); + + if (replied) { + n->replied = 1; + n->reply_time = h->now; + n->request_time = 0; + n->pinged = 0; + } + if (token) { + if (token_len >= 40) { + ks_log(KS_LOG_DEBUG, "Eek! Overlong token.\n"); + assert(0); + } else { + memcpy(n->token, token, token_len); + n->token_len = token_len; + } + } + + return 1; +} + +static void flush_search_node(struct search_node *n, struct search *sr) +{ + int i = n - sr->nodes, j; + for (j = i; j < sr->numnodes - 1; j++) { + sr->nodes[j] = sr->nodes[j + 1]; + } + sr->numnodes--; +} + +static void expire_searches(dht_handle_t *h) +{ + struct search *sr = h->searches, *previous = NULL; + + while (sr) { + struct search *next = sr->next; + if (sr->step_time < h->now - DHT_SEARCH_EXPIRE_TIME) { + if (previous) { + previous->next = next; + } else { + h->searches = next; + } + ks_pool_free(h->pool, sr); + h->numsearches--; + } else { + previous = sr; + } + sr = next; + } +} + +/* This must always return 0 or 1, never -1, not even on failure (see below). */ +static int search_send_get_peers(dht_handle_t *h, struct search *sr, struct search_node *n) +{ + struct node *node; + unsigned char tid[4]; + + if (n == NULL) { + int i; + for (i = 0; i < sr->numnodes; i++) { + if (sr->nodes[i].pinged < 3 && !sr->nodes[i].replied && sr->nodes[i].request_time < h->now - 15) { + n = &sr->nodes[i]; + } + } + } + + if (!n || n->pinged >= 3 || n->replied || n->request_time >= h->now - 15) { + return 0; + } + + ks_log(KS_LOG_DEBUG, "Sending get_peers.\n"); + make_tid(tid, "gp", sr->tid); + send_get_peers(h, &n->ss, tid, 4, sr->id, -1, n->reply_time >= h->now - 15); + n->pinged++; + n->request_time = h->now; + /* If the node happens to be in our main routing table, mark it as pinged. */ + if ((node = find_node(h, n->id, n->ss.family))) { + pinged(h, node, NULL); + } + return 1; +} + +/* When a search is in progress, we periodically call search_step to send + further requests. */ +static void search_step(dht_handle_t *h, struct search *sr) +{ + int i, j; + int all_done = 1; + + /* Check if the first 8 live nodes have replied. */ + j = 0; + for (i = 0; i < sr->numnodes && j < 8; i++) { + struct search_node *n = &sr->nodes[i]; + if (n->pinged >= 3) { + continue; + } + if (!n->replied) { + all_done = 0; + break; + } + j++; + } + + if (all_done) { + int all_acked = 1; + if (sr->port == 0) { + goto done; + } + + j = 0; + + for (i = 0; i < sr->numnodes && j < 8; i++) { + struct search_node *n = &sr->nodes[i]; + struct node *node; + unsigned char tid[4]; + if (n->pinged >= 3) { + continue; + } + /* A proposed extension to the protocol consists in omitting the token when storage tables are full. While + I don't think this makes a lot of sense -- just sending a positive reply is just as good --, let's deal with it. */ + if (n->token_len == 0) { + n->acked = 1; + } + if (!n->acked) { + all_acked = 0; + ks_log(KS_LOG_DEBUG, "Sending announce_peer.\n"); + make_tid(tid, "ap", sr->tid); + send_announce_peer(h, &n->ss, + tid, 4, sr->id, sr->port, + n->token, n->token_len, + n->reply_time < h->now - 15); + n->pinged++; + n->request_time = h->now; + node = find_node(h, n->id, n->ss.family); + if (node) pinged(h, node, NULL); + } + j++; + } + if (all_acked) { + goto done; + } + + sr->step_time = h->now; + return; + } + + if (sr->step_time + 15 >= h->now) { + return; + } + + j = 0; + for (i = 0; i < sr->numnodes; i++) { + j += search_send_get_peers(h, sr, &sr->nodes[i]); + if (j >= 3) { + break; + } + } + sr->step_time = h->now; + return; + + done: + sr->done = 1; + if (h->callback) { + h->callback(h->closure, sr->af == AF_INET ? KS_DHT_EVENT_SEARCH_DONE : KS_DHT_EVENT_SEARCH_DONE6, sr->id, NULL, 0); + } + sr->step_time = h->now; +} + +static struct search *new_search(dht_handle_t *h) +{ + struct search *sr, *oldest = NULL; + + /* Find the oldest done search */ + sr = h->searches; + while (sr) { + if (sr->done && (oldest == NULL || oldest->step_time > sr->step_time)) { + oldest = sr; + } + sr = sr->next; + } + + /* The oldest slot is expired. */ + if (oldest && oldest->step_time < h->now - DHT_SEARCH_EXPIRE_TIME) { + return oldest; + } + + /* Allocate a new slot. */ + if (h->numsearches < DHT_MAX_SEARCHES) { + sr = ks_pool_alloc(h->pool, sizeof(struct search)); + sr->next = h->searches; + h->searches = sr; + h->numsearches++; + return sr; + } + + /* Oh, well, never mind. Reuse the oldest slot. */ + return oldest; +} + +/* Insert the contents of a bucket into a search structure. */ +static void insert_search_bucket(dht_handle_t *h, struct bucket *b, struct search *sr) +{ + struct node *n; + n = b->nodes; + while (n) { + insert_search_node(h, n->id, &n->ss, sr, 0, NULL, 0); + n = n->next; + } +} + +/* Start a search. If port is non-zero, perform an announce when the + search is complete. */ +KS_DECLARE(int) dht_search(dht_handle_t *h, const unsigned char *id, int port, int af, dht_callback_t callback, void *closure) +{ + struct search *sr; + struct storage *st; + struct bucket *b = find_bucket(h, id, af); + + if (b == NULL) { + errno = EAFNOSUPPORT; + return -1; + } + + if (!callback) callback = h->callback; + if (!closure) closure = h->closure; + + /* Try to answer this search locally. In a fully grown DHT this + is very unlikely, but people are running modified versions of + this code in private DHTs with very few nodes. What's wrong + with flooding? */ + if (callback) { + st = find_storage(h, id); + if (st) { + int i; + + ks_log(KS_LOG_DEBUG, "Found local data (%d peers).\n", st->numpeers); + + for (i = 0; i < st->numpeers; i++) { + (*callback)(closure, st->peers[i].addr.family == AF_INET ? KS_DHT_EVENT_VALUES : KS_DHT_EVENT_VALUES6, id, (void *)&st->peers[i].addr, sizeof(st->peers[i].addr)); + } + } + } + + sr = h->searches; + while (sr) { + if (sr->af == af && id_cmp(sr->id, id) == 0) { + break; + } + sr = sr->next; + } + + if (sr) { + /* We're reusing data from an old search. Reusing the same tid + means that we can merge replies for both searches. */ + int i; + sr->done = 0; + again: + for (i = 0; i < sr->numnodes; i++) { + struct search_node *n; + n = &sr->nodes[i]; + /* Discard any doubtful nodes. */ + if (n->pinged >= 3 || n->reply_time < h->now - 7200) { + flush_search_node(n, sr); + goto again; + } + n->pinged = 0; + n->token_len = 0; + n->replied = 0; + n->acked = 0; + } + } else { + sr = new_search(h); + if (sr == NULL) { + errno = ENOSPC; + return -1; + } + sr->af = af; + sr->tid = h->search_id++; + sr->step_time = 0; + memcpy(sr->id, id, 20); + sr->done = 0; + sr->numnodes = 0; + } + + sr->port = port; + + insert_search_bucket(h, b, sr); + + if (sr->numnodes < SEARCH_NODES) { + struct bucket *p = previous_bucket(h, b); + if (b->next) { + insert_search_bucket(h, b->next, sr); + } + if (p) { + insert_search_bucket(h, p, sr); + } + } + if (sr->numnodes < SEARCH_NODES) { + insert_search_bucket(h, find_bucket(h, h->myid, af), sr); + } + + search_step(h, sr); + h->search_time = h->now; + return 1; +} + +/* A struct storage stores all the stored peer addresses for a given info hash. */ +static struct storage *find_storage(dht_handle_t *h, const unsigned char *id) +{ + struct storage *st = h->storage; + + while(st) { + if (id_cmp(id, st->id) == 0) { + break; + } + st = st->next; + } + return st; +} + +static int storage_store(dht_handle_t *h, const unsigned char *id, const ks_sockaddr_t *sa, unsigned short port) +{ + int i; + struct storage *st; + + st = find_storage(h, id); + + if (st == NULL) { + if (h->numstorage >= DHT_MAX_HASHES) { + return -1; + } + + st = ks_pool_alloc(h->pool, sizeof(struct storage)); + memcpy(st->id, id, 20); + st->next = h->storage; + h->storage = st; + h->numstorage++; + } + + for(i = 0; i < st->numpeers; i++) { + if (ks_addr_cmp(&st->peers[i].addr, sa)) { + break; + } + } + + if (i < st->numpeers) { + /* Already there, only need to refresh */ + st->peers[i].time = h->now; + return 0; + } else { + struct peer *p; + if (i >= st->maxpeers) { + /* Need to expand the array. */ + struct peer *new_peers; + int n; + if (st->maxpeers >= DHT_MAX_PEERS) { + return 0; + } + n = st->maxpeers == 0 ? 2 : 2 * st->maxpeers; + n = MIN(n, DHT_MAX_PEERS); + + if (!(new_peers = realloc(st->peers, n * sizeof(struct peer)))) { + return -1; + } + st->peers = new_peers; + st->maxpeers = n; + } + p = &st->peers[st->numpeers++]; + p->time = h->now; + ks_addr_copy(&p->addr, sa); + return 1; + } +} + +static int expire_storage(dht_handle_t *h) +{ + struct storage *st = h->storage, *previous = NULL; + + while (st) { + int i = 0; + while (i < st->numpeers) { + if (st->peers[i].time < h->now - 32 * 60) { + if (i != st->numpeers - 1) + st->peers[i] = st->peers[st->numpeers - 1]; + st->numpeers--; + } else { + i++; + } + } + + if (st->numpeers == 0) { + free(st->peers); + if (previous) { + previous->next = st->next; + ks_pool_free(h->pool, st); + st = previous->next; + } else { + h->storage = st->next; + ks_pool_free(h->pool, st); + st = h->storage; + } + + h->numstorage--; + if (h->numstorage < 0) { + ks_log(KS_LOG_DEBUG, "Eek... numstorage became negative.\n"); + h->numstorage = 0; + } + } else { + previous = st; + st = st->next; + } + } + return 1; +} + +static int rotate_secrets(dht_handle_t *h) +{ + h->rotate_secrets_time = h->now + 900 + random() % 1800; + + memcpy(h->oldsecret, h->secret, sizeof(h->secret)); + randombytes_buf(h->secret, sizeof(h->secret)); + + return 1; +} + +#ifndef TOKEN_SIZE +#define TOKEN_SIZE 8 +#endif + +static void make_token(dht_handle_t *h, const ks_sockaddr_t *sa, int old, unsigned char *token_return) +{ + void *ip; + ks_size_t iplen; + unsigned short port; + + ks_addr_raw_data(sa, &ip, &iplen); + port = htons(sa->port); + + dht_hash(token_return, TOKEN_SIZE, old ? h->oldsecret : h->secret, sizeof(h->secret), ip, iplen, (unsigned char*)&port, 2); +} + +static int token_match(dht_handle_t *h, const unsigned char *token, int token_len, const ks_sockaddr_t *sa) +{ + unsigned char t[TOKEN_SIZE]; + + if (token_len != TOKEN_SIZE) { + return 0; + } + + make_token(h, sa, 0, t); + if (memcmp(t, token, TOKEN_SIZE) == 0) { + return 1; + } + + make_token(h, sa, 1, t); + if (memcmp(t, token, TOKEN_SIZE) == 0) { + return 1; + } + + return 0; +} + +KS_DECLARE(int) dht_nodes(dht_handle_t *h, int af, int *good_return, int *dubious_return, int *cached_return, int *incoming_return) +{ + int good = 0, dubious = 0, cached = 0, incoming = 0; + struct bucket *b = af == AF_INET ? h->buckets : h->buckets6; + + while (b) { + struct node *n = b->nodes; + while (n) { + if (node_good(h, n)) { + good++; + if (n->time > n->reply_time) { + incoming++; + } + } else { + dubious++; + } + n = n->next; + } + if (b->cached.family > 0) { + cached++; + } + b = b->next; + } + + if (good_return) { + *good_return = good; + } + + if (dubious_return) { + *dubious_return = dubious; + } + + if (cached_return) { + *cached_return = cached; + } + + if (incoming_return) { + *incoming_return = incoming; + } + + return good + dubious; +} + +static void dump_bucket(dht_handle_t *h, FILE *f, struct bucket *b) +{ + struct node *n = b->nodes; + int mine = in_bucket(h->myid, b); + int age = (int)(h->now - b->time); + int cached = b->cached.family; + fprintf(f, "Bucket "); + print_hex(f, b->first, 20); + fprintf(f, " count %d age %d%s%s:\n", b->count, age, mine ? " (mine)" : "", cached ? " (cached)" : ""); + + while (n) { + fprintf(f, " Node "); + print_hex(f, n->id, 20); + + if (n->ss.family == AF_INET6) { + fprintf(f, " [%s]:%d ", n->ss.host, n->ss.port); + } else { + fprintf(f, " %s:%d ", n->ss.host, n->ss.port); + } + + if (n->time != n->reply_time) { + fprintf(f, "age %ld, %ld", (long)(h->now - n->time), (long)(h->now - n->reply_time)); + } else { + fprintf(f, "age %ld", (long)(h->now - n->time)); + } + + if (n->pinged) { + fprintf(f, " (%d)", n->pinged); + } + + if (node_good(h, n)) { + fprintf(f, " (good)"); + } + fprintf(f, "\n"); + n = n->next; + } + +} + +KS_DECLARE(void) dht_dump_tables(dht_handle_t *h, FILE *f) +{ + int i; + struct bucket *b; + struct storage *st = h->storage; + struct search *sr = h->searches; + + fprintf(f, "My id "); + print_hex(f, h->myid, 20); + fprintf(f, "\n"); + + b = h->buckets; + while (b) { + dump_bucket(h, f, b); + b = b->next; + } + + fprintf(f, "\n"); + + b = h->buckets6; + while (b) { + dump_bucket(h, f, b); + b = b->next; + } + + while (sr) { + fprintf(f, "\nSearch%s id ", sr->af == AF_INET6 ? " (IPv6)" : ""); + print_hex(f, sr->id, 20); + fprintf(f, " age %d%s\n", (int)(h->now - sr->step_time), sr->done ? " (done)" : ""); + for (i = 0; i < sr->numnodes; i++) { + struct search_node *n = &sr->nodes[i]; + fprintf(f, "Node %d id ", i); + print_hex(f, n->id, 20); + fprintf(f, " bits %d age ", common_bits(sr->id, n->id)); + if (n->request_time) { + fprintf(f, "%d, ", (int)(h->now - n->request_time)); + } + fprintf(f, "%d", (int)(h->now - n->reply_time)); + if (n->pinged) { + fprintf(f, " (%d)", n->pinged); + } + fprintf(f, "%s%s.\n", find_node(h, n->id, AF_INET) ? " (known)" : "", n->replied ? " (replied)" : ""); + } + sr = sr->next; + } + + while (st) { + fprintf(f, "\nStorage "); + print_hex(f, st->id, 20); + fprintf(f, " %d/%d nodes:", st->numpeers, st->maxpeers); + for (i = 0; i < st->numpeers; i++) { + char buf[100]; + if (st->peers[i].addr.family == AF_INET) { + ks_snprintf(buf, sizeof(buf), "%s", st->peers[i].addr.host); + } else if (st->peers[i].addr.family == AF_INET6) { + ks_snprintf(buf, sizeof(buf), "[%s]", st->peers[i].addr.host); + } else { + strcpy(buf, "???"); + } + fprintf(f, " %s:%u (%ld)", buf, st->peers[i].addr.port, (long)(h->now - st->peers[i].time)); + } + st = st->next; + } + + fprintf(f, "\n\n"); + fflush(f); +} + +static void ks_dht_store_entry_destroy(struct ks_dht_store_entry_s **old_entry) +{ + struct ks_dht_store_entry_s *entry = *old_entry; + ks_pool_t *pool = entry->pool; + *old_entry = NULL; + + /* While setting these members to NULL is not required, defaulting to including them for easier debugging */ + entry->key = NULL; + entry->content_type = NULL; + entry->payload_raw = NULL; + entry->pool = NULL; + + if ( entry->bencode_message_raw ) { + ben_free(entry->bencode_message_raw); + entry->bencode_message_raw = NULL; + } + + if ( entry->payload_bencode ) { + ben_free(entry->payload_bencode); + entry->payload_bencode = NULL; + } + + if ( entry->body ) { + cJSON_Delete(entry->body); + entry->body = NULL; + } + + ks_pool_free(pool, entry); + return; +} + +/* Entries can be created by a remote system 'pushing' a message to us, or the local system creating and sending the message. */ + +static int ks_dht_store_entry_create(struct dht_handle_s *h, struct bencode *msg, struct ks_dht_store_entry_s **new_entry, ks_time_t life, ks_bool_t mine) +{ + struct ks_dht_store_entry_s *entry = NULL; + ks_time_t now = ks_time_now_sec(); + + entry = ks_pool_alloc(h->pool, sizeof(struct ks_dht_store_entry_s)); + entry->pool = h->pool; + entry->received = now; + entry->expiration = now + life; + entry->last_announce = 0; /* TODO: Instead we should announce this one, and set to now */ + entry->serial = 1; + entry->mine = mine; + + entry->bencode_message_raw = msg; + entry->payload_raw = NULL; + + entry->content_type = NULL; + entry->payload_bencode = NULL; + entry->body = NULL; + + if ( msg ) { + struct bencode *key_args = ben_dict_get_by_str(msg, "a"); + struct bencode *key_token = NULL; + struct bencode *key_v = NULL; + struct bencode *key_ct = NULL; + struct bencode *tmp_v = NULL; + + if ( !key_args ) { + ks_log(KS_LOG_ERROR, "dht_store_entry requires an 'a' key in the message\n"); + goto err; + } + + key_token = ben_dict_get_by_str(key_args, "token"); + if ( !key_token ) { + ks_log(KS_LOG_ERROR, "dht_store_entry requires an 'token' key in the message\n"); + goto err; + } + entry->key = ben_str_val(key_token); + ks_log(KS_LOG_INFO, "dht_store_entry now with new key[%s]\n", entry->key); + + key_v = ben_dict_get_by_str(key_args, "v"); + if ( !key_v ) { + ks_log(KS_LOG_ERROR, "dht_store_entry requires an 'v' key in the message\n"); + goto err; + } + + tmp_v = ben_decode(ben_str_val(key_v), ben_str_len(key_v)); + + entry->payload_raw = ben_str_val(tmp_v); + entry->payload_bencode = ben_decode(entry->payload_raw, ben_str_len(tmp_v)); + + if ( !entry->payload_bencode ) { + ks_log(KS_LOG_WARNING, "dht_store_entry payload failed to parse as bencode object\n"); + goto err; + } + + ks_log(KS_LOG_DEBUG, "Payload: %s\n", ben_print(entry->payload_bencode)); + + if ( ! ben_is_dict( entry->payload_bencode ) ) { + ks_log(KS_LOG_DEBUG, "dht_store_entry is not a bencode dict. Legal, just not likely one of ours.\n"); + goto done; + } + + /* + This is a custom key that SWITCHBLADE is adding to give the protocol decoder a hint as to the payload type. + If this key is not set, then we need to assume that the payload is binary buffer of a known length, likely not from SWITCHBLADE. + */ + key_ct = ben_dict_get_by_str(entry->payload_bencode, "ct"); + if ( !key_ct ) { + ks_log(KS_LOG_DEBUG, "dht_store_entry without a 'ct' key to hint at payload content type. Legal, just not likely one of ours.\n"); + goto done; + } + + entry->content_type = ben_str_val(key_ct); + + if ( !ben_cmp_with_str(key_ct, "json") ) { + struct bencode *key_b = ben_dict_get_by_str(entry->payload_bencode, "b"); + int buf_len = ben_str_len(key_b); + char *buf = NULL; + + buf = calloc(1, buf_len); + memcpy(buf, ben_str_val(key_b), buf_len); + + entry->body = cJSON_Parse(buf); + free(buf); + buf = NULL; + + if ( !entry->body ) { + ks_log(KS_LOG_ERROR, "dht_store_entry with json payload failed to json parse. Someone sent and signed an invalid message.\n"); + goto err; + } + + if ( h->store_json_cb ) { + h->store_json_cb(h, entry->body, h->store_json_cb_arg); + } + } + } + + done: + *new_entry = entry; + return 0; + err: + ks_dht_store_entry_destroy(&entry); + return -1; +} + +static struct ks_dht_store_entry_s *ks_dht_store_fetch(struct ks_dht_store_s *store, char *key) +{ + assert(store != NULL); + + return ks_hash_search(store->hash, (void *)key, 0); +} + +static int ks_dht_store_insert(struct ks_dht_store_s *store, struct ks_dht_store_entry_s *entry, ks_time_t now) +{ + return ks_hash_insert(store->hash, (void *)entry->key, entry); +} + +static int ks_dht_store_replace(struct ks_dht_store_s *store, struct ks_dht_store_entry_s *entry) +{ + struct ks_dht_store_entry_s *val = ks_hash_remove(store->hash, (void *) entry->key); + + if ( val ) { + ks_dht_store_entry_destroy(&val); + } + + return ks_hash_insert(store->hash, (void *) entry->key, entry); +} + +static void ks_dht_store_prune(struct ks_dht_store_s *store, ks_time_t now) +{ + (void) store; + (void) now; + return; +} + +/* TODO: Look into using the ks_hash automatic destructor functionality. */ +static int ks_dht_store_create(ks_pool_t *pool, struct ks_dht_store_s **new_store) +{ + struct ks_dht_store_s *store = NULL; + + store = ks_pool_alloc(pool, sizeof(struct ks_dht_store_s)); + store->next_expiring = 0; + store->pool = pool; + + ks_hash_create(&store->hash, KS_HASH_MODE_DEFAULT, KS_HASH_FLAG_RWLOCK, pool); + + *new_store = store; + return 0; +} + +static void ks_dht_store_destroy(struct ks_dht_store_s **old_store) +{ + struct ks_dht_store_s *store = *old_store; + ks_hash_iterator_t *itt = NULL; + ks_pool_t *pool = store->pool; + *old_store = NULL; + + ks_hash_write_lock(store->hash); + for (itt = ks_hash_first(store->hash, KS_UNLOCKED); itt; itt = ks_hash_next(&itt)) { + const void *key = NULL; + struct ks_dht_store_entry_s *val = NULL; + + ks_hash_this(itt, &key, NULL, (void **) &val); + ks_hash_remove(store->hash, (char *)key); + + ks_dht_store_entry_destroy(&val); + } + ks_hash_write_unlock(store->hash); + + ks_hash_destroy(&store->hash); + + ks_pool_free(pool, store); + + return; +} + +static void reset_poll(dht_handle_t *h) +{ + int i = 0, socks = h->ip4s + h->ip6s; + ks_hash_iterator_t *itt; + + if (!h->iphash) return; + + if (h->num_pollsocks < socks) { + h->num_pollsocks = socks; + h->pollsocks = (struct pollfd *)ks_pool_resize(h->pool, (void *)h->pollsocks, sizeof(struct pollfd) * h->num_pollsocks); + h->iptsocks = (ks_ip_t **) ks_pool_resize(h->pool, (void *)h->iptsocks, sizeof(ks_ip_t *) * h->num_pollsocks); + h->addrsocks = (ks_sockaddr_t **) ks_pool_resize(h->pool, (void *)h->addrsocks, sizeof(ks_sockaddr_t *) * h->num_pollsocks); + ks_log(KS_LOG_DEBUG, "Resize poll array to %d\n", h->num_pollsocks); + } + + for (itt = ks_hash_first(h->iphash, KS_UNLOCKED); itt; itt = ks_hash_next(&itt)) { + const void *key; + void *val; + ks_ip_t *ipt; + + ks_hash_this(itt, &key, NULL, &val); + + ipt = (ks_ip_t *) val; + + h->pollsocks[i].fd = ipt->sock; + h->pollsocks[i].events = POLLIN | POLLERR; + h->iptsocks[i] = ipt; + h->addrsocks[i] = &ipt->addr; + i++; + } +} + +KS_DECLARE(ks_status_t) ks_dht_get_bind_addrs(dht_handle_t *h, const ks_sockaddr_t ***addrs, ks_size_t *addrlen) +{ + *addrs = (const ks_sockaddr_t **) h->addrsocks; + *addrlen = h->num_pollsocks; + + return KS_STATUS_SUCCESS; +} + +KS_DECLARE(ks_status_t) ks_dht_one_loop(dht_handle_t *h, int timeout) +{ + ks_status_t status; + int s, i; + unsigned char buf[65536] = {0}; + ks_size_t bytes = sizeof(buf); + + if (!h->started) { + return KS_STATUS_FAIL; + } + + + reset_poll(h); + + if (!timeout) timeout = h->tosleep * 1000; + + + s = ks_poll(h->pollsocks, h->num_pollsocks, timeout); + + if (s < 0) { + return KS_STATUS_FAIL; + } + + if (s == 0) { + dht_periodic(h, buf, 0, NULL); + return KS_STATUS_TIMEOUT; + } + + for (i = 0; i < h->num_pollsocks; i++) { + if ((h->pollsocks[i].revents & POLLIN)) { + ks_sockaddr_t remote_addr = KS_SA_INIT; + + remote_addr.family = h->iptsocks[i]->addr.family; + if ((status = ks_socket_recvfrom(h->pollsocks[i].fd, buf, &bytes, &remote_addr)) == KS_STATUS_SUCCESS) { + // git rid of tosleep and convert it to non-blocking counter so you can still call this in a loop and just return timeout till tosleep expired + // beginning of rabbit hole to change references to addrs to ks_addrs instead and stop passing sockaddr and len all over the place. + dht_periodic(h, buf, bytes, &remote_addr); + } + } + } + + return KS_STATUS_SUCCESS; +} + +static void clear_all_ip(dht_handle_t *h) +{ + ks_hash_iterator_t *itt; + + if (!h->iphash) return; + + ks_hash_write_lock(h->iphash); + for (itt = ks_hash_first(h->iphash, KS_UNLOCKED); itt; itt = ks_hash_next(&itt)) { + const void *key; + void *val; + ks_ip_t *ipt; + + ks_hash_this(itt, &key, NULL, &val); + + ipt = (ks_ip_t *) val; + + ks_socket_close(&ipt->sock); + ks_pool_free(h->pool, ipt); + + if (ipt->addr.family == AF_INET) { + h->ip4s--; + } else { + h->ip6s--; + } + + ks_hash_remove(h->iphash, (char *)key); + } + ks_hash_write_unlock(h->iphash); + + ks_hash_destroy(&h->iphash); + +} + +static ks_ip_t *add_ip(dht_handle_t *h, const char *ip, int port, int family) +{ + ks_ip_t *ipt; + + ks_assert(h); + ks_assert(ip); + + if (!port) port = h->port; + + if (family == AF_INET) { + h->af_flags |= KS_DHT_AF_INET4; + + if (!h->buckets) { + h->buckets = ks_pool_alloc(h->pool, sizeof(*h->buckets)); + h->buckets->af = AF_INET; + } + + } else if (family == AF_INET6) { + h->af_flags |= KS_DHT_AF_INET6; + + if (!h->buckets6) { + h->buckets6 = ks_pool_alloc(h->pool, sizeof(*h->buckets6)); + h->buckets6->af = AF_INET6; + } + } + + + ks_log(KS_LOG_DEBUG, "Adding bind ip: %s port: %d family:%d\n", ip, port, family); + + ipt = ks_pool_alloc(h->pool, sizeof(*ipt)); + ipt->sock = KS_SOCK_INVALID; + + ks_set_string(ipt->ip, ip); + + ks_addr_set(&ipt->addr, ip, port, family); + + if ((ipt->sock = socket(family, SOCK_DGRAM, IPPROTO_UDP)) == KS_SOCK_INVALID) { + ks_log(KS_LOG_ERROR, "Socket Error\n"); + ks_pool_free(h->pool, ipt); + return NULL; + } + + if (ks_addr_bind(ipt->sock, &ipt->addr) != KS_STATUS_SUCCESS) { + ks_log(KS_LOG_ERROR, "Error Adding bind ip: %s port: %d sock: %d (%s)\n", ip, port, ipt->sock, strerror(errno)); + ks_socket_close(&ipt->sock); + ks_pool_free(h->pool, ipt); + return NULL; + } + + ks_socket_option(ipt->sock, SO_REUSEADDR, KS_TRUE); + ks_socket_option(ipt->sock, KS_SO_NONBLOCK, KS_TRUE); + + ks_hash_insert(h->iphash, (void *)ipt->ip, ipt); + + if (family == AF_INET) { + h->ip4s++; + } else { + h->ip6s++; + } + + reset_poll(h); + + return ipt; +} + +KS_DECLARE(void) ks_dht_set_port(dht_handle_t *h, unsigned int port) +{ + h->port = port; +} + +KS_DECLARE(void) ks_dht_set_v(dht_handle_t *h, const unsigned char *v) +{ + if (v) { + memcpy(h->my_v, "1:v4:", 5); + memcpy(h->my_v + 5, v, 4); + h->have_v = 1; + } else { + h->have_v = 0; + } +} + +KS_DECLARE(ks_status_t) ks_dht_add_ip(dht_handle_t *h, char *ip, int port) +{ + int family = AF_INET; + + if (strchr(ip, ':')) { + family = AF_INET6; + } + + return add_ip(h, ip, port, family) ? KS_STATUS_SUCCESS : KS_STATUS_FAIL; +} + + +KS_DECLARE(void) ks_dht_set_callback(dht_handle_t *h, dht_callback_t callback, void *closure) +{ + h->callback = callback; + h->closure = closure; +} + + +KS_DECLARE(void) ks_dht_set_param(dht_handle_t *h, ks_dht_param_t param, ks_bool_t val) +{ + switch(param) { + case DHT_PARAM_AUTOROUTE: + h->autoroute = val; + break; + } +} + +KS_DECLARE(void) ks_dht_start(dht_handle_t *h) +{ + char ip[48] = ""; + int mask = 0; + + if (h->started) return; + + if ((h->af_flags & KS_DHT_AF_INET4) && !h->ip4s) { + ks_find_local_ip(ip, sizeof(ip), &mask, AF_INET, NULL); + add_ip(h, ip, 0, AF_INET); + } + + if ((h->af_flags & KS_DHT_AF_INET6) && !h->ip6s) { + ks_find_local_ip(ip, sizeof(ip), &mask, AF_INET6, NULL); + add_ip(h, ip, 0, AF_INET6); + } + + h->started = 1; + +} + +//KS_DECLARE(int) dht_init(dht_handle_t **handle, int s, int s6, const unsigned char *id, const unsigned char *v, unsigned int port) +KS_DECLARE(ks_status_t) ks_dht_init(dht_handle_t **handle, ks_dht_af_flag_t af_flags, const unsigned char *id, unsigned int port) +{ + int rc; + dht_handle_t *h; + ks_pool_t *pool; + + ks_pool_open(&pool); + *handle = h = ks_pool_alloc(pool, sizeof(dht_handle_t)); + + h->pool = pool; + h->searches = NULL; + h->numsearches = 0; + + if (port) { + h->port = port; + } else { + h->port = 5309; + } + + h->af_flags = af_flags; + + + ks_hash_create(&h->iphash, KS_HASH_MODE_DEFAULT, KS_HASH_FLAG_RWLOCK, h->pool); + + h->store_json_cb = NULL; + h->store_json_cb_arg = NULL; + + h->storage = NULL; + h->numstorage = 0; + + if (!id) { + //ks_random_string((char *)h->myid, 20, NULL); + randombytes_buf(h->myid, 20); + } else { + memcpy(h->myid, id, 20); + } + + h->have_v = 0; + + h->now = ks_time_now_sec(); + + h->mybucket_grow_time = h->now; + h->mybucket6_grow_time = h->now; + h->confirm_nodes_time = h->now + random() % 3; + + h->search_id = random() & 0xFFFF; + h->search_time = 0; + + h->next_blacklisted = 0; + + h->token_bucket_time = h->now; + h->token_bucket_tokens = MAX_TOKEN_BUCKET_TOKENS; + + memset(h->secret, 0, sizeof(h->secret)); + rc = rotate_secrets(h); + if (rc < 0) + goto fail; + + + expire_buckets(h, h->buckets); + expire_buckets(h, h->buckets6); + + ks_dht_store_create(h->pool, &h->store); + + return KS_STATUS_SUCCESS; + + fail: + ks_pool_free(h->pool, h->buckets); + h->buckets = NULL; + ks_pool_free(h->pool, h->buckets6); + h->buckets6 = NULL; + return KS_STATUS_FAIL; +} + +KS_DECLARE(int) dht_uninit(dht_handle_t **handle) +{ + dht_handle_t *h; + ks_pool_t *pool; + + ks_assert(handle && *handle); + + h = *handle; + *handle = NULL; + + clear_all_ip(h); + + while (h->buckets) { + struct bucket *b = h->buckets; + h->buckets = b->next; + while (b->nodes) { + struct node *n = b->nodes; + b->nodes = n->next; + ks_pool_free(h->pool, n); + } + ks_pool_free(h->pool, b); + } + + while (h->buckets6) { + struct bucket *b = h->buckets6; + h->buckets6 = b->next; + while (b->nodes) { + struct node *n = b->nodes; + b->nodes = n->next; + ks_pool_free(h->pool, n); + } + ks_pool_free(h->pool, b); + } + + while (h->storage) { + struct storage *st = h->storage; + h->storage = h->storage->next; + ks_pool_free(h->pool, st->peers); + ks_pool_free(h->pool, st); + } + + while (h->searches) { + struct search *sr = h->searches; + h->searches = h->searches->next; + ks_pool_free(h->pool, sr); + } + + ks_dht_store_destroy(&h->store); + pool = h->pool; + h->pool = NULL; + ks_pool_free(pool, h); + ks_pool_close(&pool); + + return 1; +} + +/* Rate control for requests we receive. */ + +static int token_bucket(dht_handle_t *h) +{ + if (h->token_bucket_tokens == 0) { + h->token_bucket_tokens = MIN(MAX_TOKEN_BUCKET_TOKENS, 100 * (h->now - h->token_bucket_time)); + h->token_bucket_time = h->now; + } + + if (h->token_bucket_tokens == 0) { + return 0; + } + + h->token_bucket_tokens--; + return 1; +} + +static int neighbourhood_maintenance(dht_handle_t *h, int af) +{ + unsigned char id[20]; + struct bucket *b = find_bucket(h, h->myid, af); + struct bucket *q; + struct node *n; + + if (b == NULL) { + return 0; + } + + memcpy(id, h->myid, 20); + id[19] = random() & 0xFF; + q = b; + + if (q->next && (q->count == 0 || (random() & 7) == 0)) { + q = b->next; + } + + if (q->count == 0 || (random() & 7) == 0) { + struct bucket *r; + r = previous_bucket(h, b); + if (r && r->count > 0) { + q = r; + } + } + + if (q) { + /* Since our node-id is the same in both DHTs, it's probably + profitable to query both families. */ + + n = random_node(q); + if (n) { + unsigned char tid[4]; + + const char *msg; + + + if ((h->af_flags & KS_DHT_AF_INET6) && (h->af_flags & KS_DHT_AF_INET4)) { + msg = "v4 and v6"; + } else if (h->af_flags & KS_DHT_AF_INET6) { + msg = "v6"; + } else { + msg = "v4"; + } + + + ks_log(KS_LOG_DEBUG, "Sending find_node for %s on %s neighborhood maintenance.\n", msg, af == AF_INET6 ? "IPv6" : "IPv4"); + make_tid(tid, "fn", 0); + send_find_node(h, &n->ss, tid, 4, id, sizeof(id), h->af_flags, n->reply_time >= h->now - 15); + pinged(h, n, q); + } + return 1; + } + return 0; +} + +static int bucket_maintenance(dht_handle_t *h, int af) +{ + struct bucket *b; + + b = af == AF_INET ? h->buckets : h->buckets6; + + while (b) { + struct bucket *q; + if (b->time < h->now - 600) { + /* This bucket hasn't seen any positive confirmation for a long + time. Pick a random id in this bucket's range, and send + a request to a random node. */ + unsigned char id[20]; + struct node *n; + int rc; + + rc = bucket_random(b, id); + if (rc < 0) { + memcpy(id, b->first, 20); + } + + q = b; + /* If the bucket is empty, we try to fill it from a neighbour. + We also sometimes do it gratuitiously to recover from + buckets full of broken nodes. */ + if (q->next && (q->count == 0 || (random() & 7) == 0)) { + q = b->next; + } + + if (q->count == 0 || (random() & 7) == 0) { + struct bucket *r; + r = previous_bucket(h, b); + if (r && r->count > 0) { + q = r; + } + } + + if (q) { + n = random_node(q); + if (n) { + unsigned char tid[4]; + int want = -1; + + if ((h->af_flags & KS_DHT_AF_INET4) && (h->af_flags & KS_DHT_AF_INET6)) { + struct bucket *otherbucket; + otherbucket = find_bucket(h, id, af == AF_INET ? AF_INET6 : AF_INET); + if (otherbucket && otherbucket->count < 8) { + /* The corresponding bucket in the other family is emptyish -- querying both is useful. */ + want = WANT4 | WANT6; + } else if (random() % 37 == 0) { + /* Most of the time, this just adds overhead. + However, it might help stitch back one of + the DHTs after a network collapse, so query + both, but only very occasionally. */ + want = WANT4 | WANT6; + } + } + + ks_log(KS_LOG_DEBUG, "Sending find_node for%s bucket maintenance.\n", af == AF_INET6 ? " IPv6" : ""); + make_tid(tid, "fn", 0); + send_find_node(h, &n->ss, tid, 4, id, sizeof(id), want, n->reply_time >= h->now - 15); + pinged(h, n, q); + /* In order to avoid sending queries back-to-back, give up for now and reschedule us soon. */ + return 1; + } + } + } + b = b->next; + } + return 0; +} + + +KS_DECLARE(int) dht_periodic(dht_handle_t *h, const void *buf, size_t buflen, ks_sockaddr_t *from) +//KS_DECLARE(int) dht_periodic(dht_handle_t *h, const void *buf, size_t buflen, const struct sockaddr *from, int fromlen, +// time_t *tosleep, dht_callback *callback, void *closure) +{ + unsigned char *logmsg = NULL; + h->now = ks_time_now_sec(); + + if (buflen > 0) { + dht_msg_type_t message; + unsigned char tid[16], id[20], info_hash[20], target[20]; + unsigned char nodes[26*16], nodes6[38*16], token[128] = {0}; + int tid_len = 16, token_len = 0; + int nodes_len = 26*16, nodes6_len = 38*16; + unsigned short port = 0; + unsigned char values[2048], values6[2048]; + int values_len = 2048, values6_len = 2048; + int want = 0; + unsigned short ttid; + struct bencode *msg_ben = NULL; + struct bencode *key_args = NULL; /* Request args */ + struct bencode *key_info_hash = NULL; + struct bencode *key_want = NULL; + struct bencode *key_token = NULL; + struct bencode *key_port = NULL; + struct bencode *key_target = NULL; + + struct bencode *key_resp = NULL; /* Response values */ + struct bencode *key_values = NULL; + struct bencode *key_values6 = NULL; + struct bencode *key_nodes = NULL; + struct bencode *key_nodes6 = NULL; + + if (is_martian(from)) { + goto dontread; + } + + if (node_blacklisted(h, from)) { + ks_log(KS_LOG_DEBUG, "Received packet from blacklisted node.\n"); + goto dontread; + } + + if (((char*)buf)[buflen] != '\0') { + ks_log(KS_LOG_DEBUG, "Unterminated message.\n"); + errno = EINVAL; + return -1; + } + + msg_ben = ben_decode((const void *) buf, buflen); + if ( !msg_ben ) { + ks_log(KS_LOG_DEBUG, "Received invalid message. Unable to ben_decode it.\n"); + goto dontread; + } + + message = parse_message(msg_ben, tid, &tid_len, id); + ks_log(KS_LOG_DEBUG, "Received bencode message[%d] from [%s] port (%d): \n\n%s\n", message, from->host, from->port, ben_print(msg_ben)); + + if (id_cmp(id, zeroes) == 0) { + message = DHT_MSG_INVALID; + } else if (id_cmp(id, h->myid) == 0) { + ks_log(KS_LOG_DEBUG, "Received message from self.\n"); + goto dontread; + } + + if (message > DHT_MSG_REPLY) { + /* Rate limit requests. */ + if (!token_bucket(h)) { + ks_log(KS_LOG_DEBUG, "Dropping request due to rate limiting.\n"); + goto dontread; + } + } + + key_args = ben_dict_get_by_str(msg_ben, "a"); + if ( key_args ) { + key_info_hash = ben_dict_get_by_str(key_args, "info_hash"); + + if ( key_info_hash ) { + memcpy(info_hash, ben_str_val(key_info_hash), ben_str_len(key_info_hash)); + } + + key_want = ben_dict_get_by_str(key_args, "want"); + + if ( key_want && ben_is_list(key_want)) { + int x = 0; + for( x = 0; x < ben_list_len(key_want); x++ ) { + struct bencode *key_tmp = ben_list_get(key_want, x); + if ( !ben_cmp_with_str(key_tmp, "n4") ) { + want |= WANT4; + } else if ( !ben_cmp_with_str(key_tmp, "n6") ) { + want |= WANT6; + } + } + } else { + want = WANT4; + } + + key_target = ben_dict_get_by_str(key_args, "target"); + + if ( key_target ) { + memcpy(target, ben_str_val(key_target), ben_str_len(key_target)); + } + + + key_token = ben_dict_get_by_str(key_args, "token"); + + if ( key_token ) { + token_len = ben_str_len(key_token); + memcpy(token, ben_str_val(key_token), token_len); + } + + key_port = ben_dict_get_by_str(key_args, "port"); + + if ( key_port ) { + port = ben_int_val(key_port); + } + } + + key_resp = ben_dict_get_by_str(msg_ben, "r"); + if ( key_resp ) { + key_values = ben_dict_get_by_str(key_resp, "values"); + + if ( key_values ) { + values_len = ben_str_len(key_values); + memcpy(values, ben_str_val(key_values), values_len); + } + + key_values6 = ben_dict_get_by_str(key_resp, "values6"); + + if ( key_values6 ) { + values6_len = ben_str_len(key_values6); + memcpy(values6, ben_str_val(key_values6), values6_len); + } + + key_nodes = ben_dict_get_by_str(key_resp, "nodes"); + + if ( key_nodes ) { + nodes_len = ben_str_len(key_nodes); + memcpy(nodes, ben_str_val(key_nodes), nodes_len); + ks_log(KS_LOG_DEBUG, "Parsed nodes from response with length %d\n", nodes_len); + } + + key_nodes6 = ben_dict_get_by_str(key_resp, "nodes6"); + + if ( key_nodes6 ) { + nodes6_len = ben_str_len(key_nodes6); + memcpy(nodes6, ben_str_val(key_nodes6), nodes6_len); + } + } + + logmsg = calloc(1, buflen); + ks_log(KS_LOG_DEBUG, "Message type %d\n", message); + switch(message) { + case DHT_MSG_STORE_PUT: + if ( buf ) { + struct ks_dht_store_entry_s *entry = NULL; + struct bencode *sig = NULL, *salt = NULL; + struct bencode *sig_ben = NULL, *pk_ben = NULL; + unsigned char *data_sig = NULL; + const char *sig_binary = NULL, *pk_binary = NULL; + size_t data_sig_len = 0; + + /* Handle checking callback handler, and response */ + if ( !key_args ) { + ks_log(KS_LOG_DEBUG, "Failed to locate 'a' field in message\n"); + goto dontread; + } else { + ks_log(KS_LOG_DEBUG, "Successfully located 'a' field in message\n"); + } + + ks_log(KS_LOG_DEBUG, "Received bencode store PUT: \n\n%s\n", ben_print(msg_ben)); + + sig_ben = ben_dict_get_by_str(key_args, "sig"); + sig_binary = ben_str_val(sig_ben); + + pk_ben = ben_dict_get_by_str(key_args, "k"); + pk_binary = ben_str_val(pk_ben); + + sig = ben_dict(); + + salt = ben_dict_get_by_str(key_args, "salt"); + if ( salt ) { + ben_dict_set(sig, ben_blob("salt", 4), ben_blob(ben_str_val(salt), ben_str_len(salt))); + } + + /* TODO: fix double reference here. Need to bencode duplicate these values, and then free sig when finished encoding it */ + ben_dict_set(sig, ben_blob("seq", 3), ben_dict_get_by_str(key_args, "seq")); + ben_dict_set(sig, ben_blob("v", 1), ben_dict_get_by_str(key_args, "v")); + + data_sig = (unsigned char *) ben_encode(&data_sig_len, sig); + + if ( !data_sig ) { + ks_log(KS_LOG_DEBUG, "Failed to encode message for signature validation\n"); + goto dontread; + } + + if (crypto_sign_verify_detached((unsigned char *)sig_binary, data_sig, data_sig_len, (unsigned char *) pk_binary) != 0) { + ks_log(KS_LOG_DEBUG, "Signature failed to verify. Corrupted or malicious data suspected!\n"); + goto dontread; + } else { + ks_log(KS_LOG_DEBUG, "Valid message store signature.\n"); + } + + ks_dht_store_entry_create(h, msg_ben, &entry, 600, 0); + ks_dht_store_insert(h->store, entry, h->now); + } + break; + case DHT_MSG_INVALID: + case DHT_MSG_ERROR: + ks_log(KS_LOG_DEBUG, "Unparseable message: %s\n", debug_printable(buf, logmsg, buflen)); + goto dontread; + case DHT_MSG_REPLY: + if (tid_len != 4) { + ks_log(KS_LOG_DEBUG, "Broken node truncates transaction ids: %s\n", debug_printable(buf, logmsg, buflen)); + /* This is really annoying, as it means that we will + time-out all our searches that go through this node. + Kill it. */ + blacklist_node(h, id, from); + goto dontread; + } + if (tid_match(tid, "pn", NULL)) { + ks_log(KS_LOG_DEBUG, "Pong!\n"); + new_node(h, id, from, 2); + } else if (tid_match(tid, "fn", NULL) || tid_match(tid, "gp", NULL)) { + int gp = 0; + struct search *sr = NULL; + if (tid_match(tid, "gp", &ttid)) { + gp = 1; + sr = find_search(h, ttid, from->family); + } + ks_log(KS_LOG_DEBUG, "Nodes found (%d+%d)%s!\n", nodes_len/26, nodes6_len/38, gp ? " for get_peers" : ""); + if (nodes_len % 26 != 0 || nodes6_len % 38 != 0) { + ks_log(KS_LOG_DEBUG, "Unexpected length for node info!\n"); + blacklist_node(h, id, from); + } else if (gp && sr == NULL) { + ks_log(KS_LOG_DEBUG, "Unknown search!\n"); + new_node(h, id, from, 1); + } else { + int i; + new_node(h, id, from, 2); + for (i = 0; i < nodes_len / 26; i++) { + unsigned char *ni = nodes + i * 26; + struct sockaddr_in sin; + ks_sockaddr_t addr = { 0 }; + + if (id_cmp(ni, h->myid) == 0) { + continue; + } + memset(&sin, 0, sizeof(sin)); + sin.sin_family = AF_INET; + memcpy(&sin.sin_addr, ni + 20, 4); + memcpy(&sin.sin_port, ni + 24, 2); + + ks_addr_set_raw(&addr, &sin.sin_addr, sin.sin_port, AF_INET); + new_node(h, ni, &addr, 0); + if (sr && sr->af == AF_INET) { + insert_search_node(h, ni, &addr, sr, 0, NULL, 0); + } + } + for (i = 0; i < nodes6_len / 38; i++) { + unsigned char *ni = nodes6 + i * 38; + struct sockaddr_in6 sin6; + ks_sockaddr_t addr = { 0 }; + + if (id_cmp(ni, h->myid) == 0) { + continue; + } + memset(&sin6, 0, sizeof(sin6)); + sin6.sin6_family = AF_INET6; + memcpy(&sin6.sin6_addr, ni + 20, 16); + memcpy(&sin6.sin6_port, ni + 36, 2); + + ks_addr_set_raw(&addr, &sin6.sin6_addr, sin6.sin6_port, AF_INET6); + + new_node(h, ni, &addr, 0); + if (sr && sr->af == AF_INET6) { + insert_search_node(h, ni, &addr, sr, 0, NULL, 0); + } + } + if (sr) { + /* Since we received a reply, the number of requests in flight has decreased. Let's push another request. */ + search_send_get_peers(h, sr, NULL); + } + } + if (sr) { + if ( token_len ) { + ks_log(KS_LOG_DEBUG, "token %d [%.*s]\n", token_len, token); + } + insert_search_node(h, id, from, sr, 1, token, token_len); + if (values_len > 0 || values6_len > 0) { + ks_log(KS_LOG_DEBUG, "Got values (%d+%d)!\n", values_len / 6, values6_len / 18); + if (h->callback) { + if (values_len > 0) { + h->callback(h->closure, KS_DHT_EVENT_VALUES, sr->id, (void*)values, values_len); + } + if (values6_len > 0) { + h->callback(h->closure, KS_DHT_EVENT_VALUES6, sr->id, (void*)values6, values6_len); + } + } + } + } + } else if (tid_match(tid, "ap", &ttid)) { + struct search *sr; + ks_log(KS_LOG_DEBUG, "Got reply to announce_peer.\n"); + sr = find_search(h, ttid, from->family); + if (!sr) { + ks_log(KS_LOG_DEBUG, "Unknown search!\n"); + new_node(h, id, from, 1); + } else { + int i; + new_node(h, id, from, 2); + for (i = 0; i < sr->numnodes; i++) { + if (id_cmp(sr->nodes[i].id, id) == 0) { + sr->nodes[i].request_time = 0; + sr->nodes[i].reply_time = h->now; + sr->nodes[i].acked = 1; + sr->nodes[i].pinged = 0; + break; + } + } + /* See comment for gp above. */ + search_send_get_peers(h, sr, NULL); + } + } else { + ks_log(KS_LOG_DEBUG, "Unexpected reply: %s\n", debug_printable(buf, logmsg, buflen)); + } + break; + case DHT_MSG_PING: + ks_log(KS_LOG_DEBUG, "Ping (%d)!\n", tid_len); + new_node(h, id, from, 1); + ks_log(KS_LOG_DEBUG, "Sending pong.\n"); + send_pong(h, from, tid, tid_len); + break; + case DHT_MSG_FIND_NODE: + if ( key_args ) { + /* + http://www.bittorrent.org/beps/bep_0005.html + http://www.bittorrent.org/beps/bep_0032.html + + find_node Query = {"t":"aa", "y":"q", "q":"find_node", "a": {"id":"abcdefghij0123456789", "target":"mnopqrstuvwxyz123456"}} + bencoded = d1:ad2:id20:abcdefghij01234567896:target20:mnopqrstuvwxyz123456e1:q9:find_node1:t2:aa1:y1:qe + */ + + ks_log(KS_LOG_DEBUG, "Find node!\n"); + /* Needs to fetch the from, and fromlen from the decoded message, as well as the target and want */ + new_node(h, id, from, 1); + ks_log(KS_LOG_DEBUG, "Sending closest nodes (%d).\n", want); + send_closest_nodes(h, from, tid, tid_len, target, want, 0, NULL, NULL, 0); + } else { + goto dontread; + } + break; + case DHT_MSG_GET_PEERS: + /* + http://www.bittorrent.org/beps/bep_0005.html + + get_peers Query = {"t":"aa", "y":"q", "q":"get_peers", "a": {"id":"abcdefghij0123456789", "info_hash":"mnopqrstuvwxyz123456"}} + bencoded = d1:ad2:id20:abcdefghij01234567899:info_hash20:mnopqrstuvwxyz123456e1:q9:get_peers1:t2:aa1:y1:qe + */ + + ks_log(KS_LOG_DEBUG, "Get_peers!\n"); + new_node(h, id, from, 1); + if (id_cmp(info_hash, zeroes) == 0) { + ks_log(KS_LOG_DEBUG, "Eek! Got get_peers with no info_hash.\n"); + send_error(h, from, tid, tid_len, 203, "Get_peers with no info_hash"); + break; + } else { + struct storage *st = find_storage(h, info_hash); + unsigned char token[TOKEN_SIZE]; + make_token(h, from, 0, token); + if (st && st->numpeers > 0) { + ks_log(KS_LOG_DEBUG, "Sending found%s peers.\n", from->family == AF_INET6 ? " IPv6" : ""); + send_closest_nodes(h, from, tid, tid_len, info_hash, want, from->family, st, token, TOKEN_SIZE); + } else { + ks_log(KS_LOG_DEBUG, "Sending nodes for get_peers.\n"); + send_closest_nodes(h, from, tid, tid_len, info_hash, want, 0, NULL, token, TOKEN_SIZE); + } + } + break; + case DHT_MSG_ANNOUNCE_PEER: + ks_log(KS_LOG_DEBUG, "Announce peer!\n"); + new_node(h, id, from, 1); + if (id_cmp(info_hash, zeroes) == 0) { + ks_log(KS_LOG_DEBUG, "Announce_peer with no info_hash.\n"); + send_error(h, from, tid, tid_len, 203, "Announce_peer with no info_hash"); + break; + } + if (!token_match(h, token, token_len, from)) { + ks_log(KS_LOG_DEBUG, "Incorrect token for announce_peer.\n"); + send_error(h, from, tid, tid_len, 203, "Announce_peer with wrong token"); + break; + } + if (port == 0) { + ks_log(KS_LOG_DEBUG, "Announce_peer with forbidden port %d.\n", port); + send_error(h, from, tid, tid_len, 203, "Announce_peer with forbidden port number"); + break; + } + storage_store(h, info_hash, from, port); + /* Note that if storage_store failed, we lie to the requestor. This is to prevent them from backtracking, and hence polluting the DHT. */ + ks_log(KS_LOG_DEBUG, "Sending peer announced.\n"); + send_peer_announced(h, from, tid, tid_len); + } + } + + dontread: + if (h->now >= h->rotate_secrets_time) { + rotate_secrets(h); + } + + if (h->now >= h->expire_stuff_time) { + expire_buckets(h, h->buckets); + expire_buckets(h, h->buckets6); + expire_storage(h); + expire_searches(h); + } + + if (h->search_time > 0 && h->now >= h->search_time) { + struct search *sr; + sr = h->searches; + while (sr) { + if (!sr->done && sr->step_time + 5 <= h->now) { + search_step(h, sr); + } + sr = sr->next; + } + + h->search_time = 0; + + sr = h->searches; + while (sr) { + if (!sr->done) { + time_t tm = sr->step_time + 15 + random() % 10; + if (h->search_time == 0 || h->search_time > tm) { + h->search_time = tm; + } + } + sr = sr->next; + } + } + + if (h->now >= h->confirm_nodes_time) { + int soon = 0; + + soon |= bucket_maintenance(h, AF_INET); + soon |= bucket_maintenance(h, AF_INET6); + + if (!soon) { + if (h->mybucket_grow_time >= h->now - 150) { + soon |= neighbourhood_maintenance(h, AF_INET); + } + if (h->mybucket6_grow_time >= h->now - 150) { + soon |= neighbourhood_maintenance(h, AF_INET6); + } + } + + /* In order to maintain all buckets' age within 600 seconds, worst case is roughly 27 seconds, assuming the table is 22 bits deep. + We want to keep a margin for neighborhood maintenance, so keep this within 25 seconds. */ + if (soon) { + h->confirm_nodes_time = h->now + 5 + random() % 20; + } else { + h->confirm_nodes_time = h->now + 60 + random() % 120; + } + } + + if (h->confirm_nodes_time > h->now) { + h->tosleep = h->confirm_nodes_time - h->now; + } else { + h->tosleep = 0; + } + + if (h->search_time > 0) { + if (h->search_time <= h->now) { + h->tosleep = 0; + } else if (h->tosleep > h->search_time - h->now) { + h->tosleep = h->search_time - h->now; + } + } + ks_safe_free(logmsg); + + ks_dht_store_prune(h->store, h->now); + + return 1; +} + +KS_DECLARE(int) dht_get_nodes(dht_handle_t *h, struct sockaddr_in *sin, int *num, + struct sockaddr_in6 *sin6, int *num6) +{ + int i, j; + struct bucket *b; + struct node *n; + + i = 0; + + /* For restoring to work without discarding too many nodes, the list + must start with the contents of our bucket. */ + b = find_bucket(h, h->myid, AF_INET); + if (b == NULL) { + goto no_ipv4; + } + + n = b->nodes; + while (n && i < *num) { + if (node_good(h, n)) { + sin[i] = *(struct sockaddr_in*)&n->ss; + i++; + } + n = n->next; + } + + b = h->buckets; + while (b && i < *num) { + if (!in_bucket(h->myid, b)) { + n = b->nodes; + while (n && i < *num) { + if (node_good(h, n)) { + sin[i] = *(struct sockaddr_in*)&n->ss; + i++; + } + n = n->next; + } + } + b = b->next; + } + + no_ipv4: + + j = 0; + + b = find_bucket(h, h->myid, AF_INET6); + if (b == NULL) { + goto no_ipv6; + } + + n = b->nodes; + while (n && j < *num6) { + if (node_good(h, n)) { + sin6[j] = *(struct sockaddr_in6*)&n->ss; + j++; + } + n = n->next; + } + + b = h->buckets6; + while (b && j < *num6) { + if (!in_bucket(h->myid, b)) { + n = b->nodes; + while (n && j < *num6) { + if (node_good(h, n)) { + sin6[j] = *(struct sockaddr_in6*)&n->ss; + j++; + } + n = n->next; + } + } + b = b->next; + } + + no_ipv6: + + *num = i; + *num6 = j; + return i + j; +} + +KS_DECLARE(int) dht_insert_node(dht_handle_t *h, const unsigned char *id, ks_sockaddr_t *sa) +{ + struct node *n; + + if (sa->family != AF_INET) { + errno = EAFNOSUPPORT; + return -1; + } + + n = new_node(h, id, sa, 0); + return !!n; +} + +KS_DECLARE(int) dht_ping_node(dht_handle_t *h, ks_sockaddr_t *sa) +{ + unsigned char tid[4]; + + ks_log(KS_LOG_DEBUG, "Sending ping.\n"); + make_tid(tid, "pn", 0); + return send_ping(h, sa, tid, 4); +} + +/* We could use a proper bencoding printer and parser, but the format of + DHT messages is fairly stylised, so this seemed simpler. */ + +#define CHECK(offset, delta, size) \ + if (delta < 0 || offset + delta > size) goto fail + +#define INC(offset, delta, size) \ + CHECK(offset, delta, size); \ + offset += delta + +#define COPY(buf, offset, src, delta, size) \ + CHECK(offset, delta, size); \ + memcpy(buf + offset, src, delta); \ + offset += delta; + +#define ADD_V(buf, offset, size) \ + if (h->have_v) { \ + COPY(buf, offset, h->my_v, sizeof(h->my_v), size); \ + } + +static int dht_send(dht_handle_t *h, const void *buf, size_t len, int flags, const ks_sockaddr_t *sa) +{ + char ip[80] = ""; + ks_ip_t *ipt; + + if (node_blacklisted(h, sa)) { + ks_log(KS_LOG_DEBUG, "Attempting to send to blacklisted node.\n"); + errno = EPERM; + return -1; + } + + + ks_ip_route(ip, sizeof(ip), sa->host); + + if (!(ipt = ks_hash_search(h->iphash, ip, KS_UNLOCKED)) && h->autoroute) { + ipt = add_ip(h, ip, 0, sa->family); + } + + if (!ipt) { + ks_log(KS_LOG_ERROR, "No route to dest\n"); + errno = EINVAL; + return -1; + } + + ks_log(KS_LOG_INFO, "Sending message to [%s] port (%d)\n", sa->host, sa->port); + + if (ks_socket_sendto(ipt->sock, (void *)buf, &len, (ks_sockaddr_t *)sa) != KS_STATUS_SUCCESS) { + ks_log(KS_LOG_ERROR, "Socket Error (%s)\n", strerror(errno)); + return -1; + } + + return 0; +} + +/* Sample ping packet '{"t":"aa", "y":"q", "q":"ping", "a":{"id":"abcdefghij0123456789"}}' */ +/* http://www.bittorrent.org/beps/bep_0005.html */ +static int send_ping(dht_handle_t *h, const ks_sockaddr_t *sa, const unsigned char *tid, int tid_len) +{ + char buf[512]; + int i = 0; + struct bencode *bencode_p = ben_dict(); + struct bencode *bencode_a_p = ben_dict(); + + ben_dict_set(bencode_p, ben_blob("t", 1), ben_blob(tid, tid_len)); + ben_dict_set(bencode_p, ben_blob("y", 1), ben_blob("q", 1)); + ben_dict_set(bencode_p, ben_blob("q", 1), ben_blob("ping", 4)); + ben_dict_set(bencode_a_p, ben_blob("id", 2), ben_blob(h->myid, 20)); + ben_dict_set(bencode_p, ben_blob("a", 1), bencode_a_p); + + i = ben_encode2(buf, 512, bencode_p); + ben_free(bencode_p); /* This SHOULD free the bencode_a_p as well */ + + return dht_send(h, buf, i, 0, sa); +} + +/* Sample pong packet '{"t":"aa", "y":"r", "r": {"id":"mnopqrstuvwxyz123456"}}' */ +/* http://www.bittorrent.org/beps/bep_0005.html */ +static int send_pong(dht_handle_t *h, const ks_sockaddr_t *sa, const unsigned char *tid, int tid_len) +{ + char buf[512]; + int i = 0; + struct bencode *bencode_p = ben_dict(); + struct bencode *bencode_a_p = ben_dict(); + + ben_dict_set(bencode_p, ben_blob("t", 1), ben_blob(tid, tid_len)); + ben_dict_set(bencode_p, ben_blob("y", 1), ben_blob("r", 1)); + ben_dict_set(bencode_a_p, ben_blob("id", 2), ben_blob(h->myid, 20)); + ben_dict_set(bencode_p, ben_blob("r", 1), bencode_a_p); + + i = ben_encode2(buf, 512, bencode_p); + ben_free(bencode_p); /* This SHOULD free the bencode_a_p as well */ + + ks_log(KS_LOG_DEBUG, "Encoded PONG\n"); + return dht_send(h, buf, i, 0, sa); +} + +/* Sample find_node packet '{"t":"aa", "y":"q", "q":"find_node", "a": {"id":"abcdefghij0123456789", "target":"mnopqrstuvwxyz123456"}}' */ +/* Sample find_node packet w/ want '{"t":"aa", "y":"q", "q":"find_node", "a": {"id":"abcdefghij0123456789", "target":"mnopqrstuvwxyz123456", "want":"n4"}}' */ +/* http://www.bittorrent.org/beps/bep_0005.html */ +/* http://www.bittorrent.org/beps/bep_0032.html for want parameter */ +static int send_find_node(dht_handle_t *h, const ks_sockaddr_t *sa, + const unsigned char *tid, int tid_len, + const unsigned char *target, int target_len, int want, int confirm) +{ + char buf[512]; + int i = 0; + struct bencode *bencode_p = ben_dict(); + struct bencode *bencode_a_p = ben_dict(); + + ben_dict_set(bencode_p, ben_blob("t", 1), ben_blob(tid, tid_len)); + ben_dict_set(bencode_p, ben_blob("y", 1), ben_blob("q", 1)); + ben_dict_set(bencode_p, ben_blob("q", 1), ben_blob("find_node", 9)); + ben_dict_set(bencode_a_p, ben_blob("id", 2), ben_blob(h->myid, 20)); + + if (target) ben_dict_set(bencode_a_p, ben_blob("target", 6), ben_blob(target, target_len)); + + if (want > 0) { + struct bencode *bencode_w = ben_list(); + if (want & WANT4) { + ben_list_append(bencode_w, ben_blob("n4", 2)); + } + if (want & WANT6) { + ben_list_append(bencode_w, ben_blob("n6", 2)); + } + ben_dict_set(bencode_a_p, ben_blob("want", 4), bencode_w); + } + + ben_dict_set(bencode_p, ben_blob("a", 1), bencode_a_p); + + i = ben_encode2(buf, 512, bencode_p); + ben_free(bencode_p); /* This SHOULD free the bencode_a_p as well */ + + return dht_send(h, buf, i, confirm ? MSG_CONFIRM : 0, sa); +} + +/* sample find_node response '{"t":"aa", "y":"r", "r": {"id":"0123456789abcdefghij", "nodes": "def456..."}}'*/ +/* http://www.bittorrent.org/beps/bep_0005.html */ +static int send_nodes_peers(dht_handle_t *h, const ks_sockaddr_t *sa, + const unsigned char *tid, int tid_len, + const unsigned char *nodes, int nodes_len, + const unsigned char *nodes6, int nodes6_len, + int af, struct storage *st, + const unsigned char *token, int token_len) +{ + char buf[2048]; + int i = 0, j0, j, k; + struct bencode *bencode_p = ben_dict(); + struct bencode *bencode_a_p = ben_dict(); + struct bencode *ben_array = ben_list(); + + ben_dict_set(bencode_p, ben_blob("t", 1), ben_blob(tid, tid_len)); + ben_dict_set(bencode_p, ben_blob("y", 1), ben_blob("r", 1)); + ben_dict_set(bencode_a_p, ben_blob("id", 2), ben_blob(h->myid, 20)); + if (token_len) ben_dict_set(bencode_a_p, ben_blob("token", 5), ben_blob(token, token_len)); + if (nodes_len) ben_dict_set(bencode_a_p, ben_blob("nodes", 5), ben_blob(nodes, nodes_len)); + if (nodes6_len) ben_dict_set(bencode_a_p, ben_blob("nodes6", 6), ben_blob(nodes6, nodes6_len)); + + /* + Response with peers = {"t":"aa", "y":"r", "r": {"id":"abcdefghij0123456789", "token":"aoeusnth", "values": ["axje.u", "idhtnm"]}} + */ + + if (st && st->numpeers > 0) { + // We treat the storage as a circular list, and serve a randomly + // chosen slice. In order to make sure we fit within 1024 octets, + // we limit ourselves to 50 peers. + + j0 = random() % st->numpeers; + j = j0; + k = 0; + + do { + if (st->peers[j].addr.family == af) { + char data[18]; + unsigned short swapped = htons(st->peers[j].addr.port); + void *ip = NULL; + ks_size_t iplen = 0; + + ks_addr_raw_data(&st->peers[j].addr, &ip, &iplen); + + memcpy(data, ip, iplen); + memcpy(data + iplen, &swapped, 2); + ben_list_append(ben_array, ben_blob(data, iplen + 2)); + k++; + } + j = (j + 1) % st->numpeers; + } while(j != j0 && k < 50); + ben_dict_set(bencode_a_p, ben_blob("values", 6), ben_array); + } + + ben_dict_set(bencode_p, ben_blob("r", 1), bencode_a_p); + i = ben_encode2(buf, 512, bencode_p); + ben_free(bencode_p); /* This SHOULD free the bencode_a_p as well */ + + return dht_send(h, buf, i, 0, sa); +} + +static int insert_closest_node(unsigned char *nodes, int numnodes, + const unsigned char *id, struct node *n) +{ + int i, size; + + if (n->ss.family == AF_INET) { + size = 26; + } else if (n->ss.family == AF_INET6) { + size = 38; + } else { + abort(); + } + + for (i = 0; i< numnodes; i++) { + if (id_cmp(n->id, nodes + size * i) == 0) { + return numnodes; + } + if (xorcmp(n->id, nodes + size * i, id) < 0) { + break; + } + } + + if (i == 8) { + return numnodes; + } + + if (numnodes < 8) { + numnodes++; + } + + if (i < numnodes - 1) { + memmove(nodes + size * (i + 1), nodes + size * i, size * (numnodes - i - 1)); + } + + if (n->ss.family == AF_INET) { + memcpy(nodes + size * i, n->id, 20); + memcpy(nodes + size * i + 20, &n->ss.v.v4.sin_addr, 4); + memcpy(nodes + size * i + 24, &n->ss.v.v4.sin_port, 2); + } else if (n->ss.family == AF_INET6) { + memcpy(nodes + size * i, n->id, 20); + memcpy(nodes + size * i + 20, &n->ss.v.v6.sin6_addr, 16); + memcpy(nodes + size * i + 36, &n->ss.v.v6.sin6_port, 2); + } else { + abort(); + } + + return numnodes; +} + +static int buffer_closest_nodes(dht_handle_t *h, unsigned char *nodes, int numnodes, const unsigned char *id, struct bucket *b) +{ + struct node *n = b->nodes; + while (n) { + if (node_good(h, n)) { + numnodes = insert_closest_node(nodes, numnodes, id, n); + } + n = n->next; + } + return numnodes; +} + +static int send_closest_nodes(dht_handle_t *h, const ks_sockaddr_t *sa, + const unsigned char *tid, int tid_len, + const unsigned char *id, int want, + int af, struct storage *st, + const unsigned char *token, int token_len) +{ + unsigned char nodes[8 * 26]; + unsigned char nodes6[8 * 38]; + int numnodes = 0, numnodes6 = 0; + struct bucket *b; + + if (want < 0) { + want = sa->family == AF_INET ? WANT4 : WANT6; + } + + if ((want & WANT4)) { + if ((b = find_bucket(h, id, AF_INET))) { + numnodes = buffer_closest_nodes(h, nodes, numnodes, id, b); + if (b->next) { + numnodes = buffer_closest_nodes(h, nodes, numnodes, id, b->next); + } + if ((b = previous_bucket(h, b))) { + numnodes = buffer_closest_nodes(h, nodes, numnodes, id, b); + } + } else { + ks_log(KS_LOG_DEBUG, "send_closest_nodes did not find a 'close' ipv4 bucket\n"); + } + } + + if ((want & WANT6)) { + if ((b = find_bucket(h, id, AF_INET6))) { + numnodes6 = buffer_closest_nodes(h, nodes6, numnodes6, id, b); + if (b->next) { + numnodes6 = buffer_closest_nodes(h, nodes6, numnodes6, id, b->next); + } + if ((b = previous_bucket(h, b))) { + numnodes6 = buffer_closest_nodes(h, nodes6, numnodes6, id, b); + } + } else { + ks_log(KS_LOG_DEBUG, "send_closest_nodes did not find a 'close' ipv6 bucket\n"); + } + } + ks_log(KS_LOG_DEBUG, "send_closest_nodes (%d+%d nodes.)\n", numnodes, numnodes6); + + return send_nodes_peers(h, sa, tid, tid_len, + nodes, numnodes * 26, + nodes6, numnodes6 * 38, + af, st, token, token_len); +} + +/* sample get_peers request '{"t":"aa", "y":"q", "q":"get_peers", "a": {"id":"abcdefghij0123456789", "info_hash":"mnopqrstuvwxyz123456"}}'*/ +/* sample get_peers w/ want '{"t":"aa", "y":"q", "q":"get_peers", "a": {"id":"abcdefghij0123456789", "info_hash":"mnopqrstuvwxyz123456": "want":"n4"}}'*/ +/* http://www.bittorrent.org/beps/bep_0005.html */ +/* http://www.bittorrent.org/beps/bep_0032.html for want parameter */ +static int send_get_peers(dht_handle_t *h, const ks_sockaddr_t *sa, + unsigned char *tid, int tid_len, unsigned char *infohash, + int want, int confirm) +{ + char buf[512]; + int i = 0; + struct bencode *bencode_p = ben_dict(); + struct bencode *bencode_a_p = ben_dict(); + int infohash_len = infohash ? strlen((const char*)infohash) : 0; + + ben_dict_set(bencode_p, ben_blob("t", 1), ben_blob(tid, tid_len)); + ben_dict_set(bencode_p, ben_blob("y", 1), ben_blob("q", 1)); + ben_dict_set(bencode_p, ben_blob("q", 1), ben_blob("get_peers", 9)); + ben_dict_set(bencode_a_p, ben_blob("id", 2), ben_blob(h->myid, 20)); + if (want > 0) { + struct bencode *bencode_w_p = ben_list(); + if (want & WANT4) { + ben_list_append(bencode_w_p, ben_blob("n4", 2)); + } + if (want & WANT6) { + ben_list_append(bencode_w_p, ben_blob("n6", 2)); + } + ben_dict_set(bencode_a_p, ben_blob("want", 4), bencode_w_p); + } + ben_dict_set(bencode_a_p, ben_blob("info_hash", 9), ben_blob(infohash, infohash_len)); + ben_dict_set(bencode_p, ben_blob("a", 1), bencode_a_p); + + i = ben_encode2(buf, 512, bencode_p); + ben_free(bencode_p); /* This SHOULD free the bencode_a_p as well */ + + ks_log(KS_LOG_DEBUG, "Encoded GET_PEERS\n"); + + return dht_send(h, buf, i, confirm ? MSG_CONFIRM : 0, sa); +} +/* '{"t":"aa", "y":"q", "q":"announce_peer", "a": {"id":"abcdefghij0123456789", "implied_port": 1, "info_hash":"mnopqrstuvwxyz123456", "port": 6881, "token": "aoeusnth"}}'*/ +static int send_announce_peer(dht_handle_t *h, const ks_sockaddr_t *sa, + unsigned char *tid, int tid_len, + unsigned char *infohash, unsigned short port, + unsigned char *token, int token_len, int confirm) +{ + char buf[512]; + int i = 0; + struct bencode *bencode_p = ben_dict(); + struct bencode *bencode_a_p = ben_dict(); + int infohash_len = infohash ? strlen((const char*)infohash) : 0; + + ben_dict_set(bencode_p, ben_blob("t", 1), ben_blob(tid, tid_len)); + ben_dict_set(bencode_p, ben_blob("y", 1), ben_blob("q", 1)); + ben_dict_set(bencode_p, ben_blob("q", 1), ben_blob("announce_peer", 13)); + ben_dict_set(bencode_a_p, ben_blob("id", 2), ben_blob(h->myid, 20)); + ben_dict_set(bencode_a_p, ben_blob("info_hash", 9), ben_blob(infohash, infohash_len)); + ben_dict_set(bencode_a_p, ben_blob("port", 5), ben_int(port)); + ben_dict_set(bencode_a_p, ben_blob("token", 5), ben_blob(token, token_len)); + ben_dict_set(bencode_p, ben_blob("a", 1), bencode_a_p); + + i = ben_encode2(buf, 512, bencode_p); + ben_free(bencode_p); /* This SHOULD free the bencode_a_p as well */ + + ks_log(KS_LOG_DEBUG, "Encoded ANNOUNCE_PEERS\n"); + return dht_send(h, buf, i, confirm ? MSG_CONFIRM : 0, sa); +} +/* '{"t":"aa", "y":"r", "r": {"id":"mnopqrstuvwxyz123456"}}'*/ +static int send_peer_announced(dht_handle_t *h, const ks_sockaddr_t *sa, unsigned char *tid, int tid_len) +{ + char buf[512]; + int i = 0; + struct bencode *bencode_p = ben_dict(); + struct bencode *bencode_a_p = ben_dict(); + + ben_dict_set(bencode_p, ben_blob("t", 1), ben_blob(tid, tid_len)); + ben_dict_set(bencode_p, ben_blob("y", 1), ben_blob("r", 1)); + ben_dict_set(bencode_a_p, ben_blob("id", 2), ben_blob(h->myid, 20)); + ben_dict_set(bencode_p, ben_blob("r", 1), bencode_a_p); + + i = ben_encode2(buf, 512, bencode_p); + ben_free(bencode_p); /* This SHOULD free the bencode_a_p as well */ + + ks_log(KS_LOG_DEBUG, "Encoded peer_announced: %s\n\n", buf); + return dht_send(h, buf, i, 0, sa); +} + +/* '{"t":"aa", "y":"e", "e":[201, "A Generic Error Ocurred"]}'*/ +static int send_error(dht_handle_t *h, const ks_sockaddr_t *sa, + unsigned char *tid, int tid_len, + int code, const char *message) +{ + char buf[512]; + int i = 0; + struct bencode *bencode_p = ben_dict(); + struct bencode *ben_array = ben_list(); + + ben_dict_set(bencode_p, ben_blob("t", 1), ben_blob(tid, tid_len)); + ben_dict_set(bencode_p, ben_blob("y", 1), ben_blob("e", 1)); + ben_list_append(ben_array, ben_int(code)); + ben_list_append(ben_array, ben_blob(message, strlen(message))); + ben_dict_set(bencode_p, ben_blob("e", 1), ben_array); + + i = ben_encode2(buf, 512, bencode_p); + ben_free(bencode_p); + + ks_log(KS_LOG_DEBUG, "Encoded error: %s\n\n", buf); + return dht_send(h, buf, i, 0, sa); +} + +#undef CHECK +#undef INC +#undef COPY +#undef ADD_V + +/* + +#ifdef HAVE_MEMMEM + +static void *dht_memmem(const void *haystack, size_t haystacklen, const void *needle, size_t needlelen) +{ + return memmem(haystack, haystacklen, needle, needlelen); +} + +#else + +static void *dht_memmem(const void *haystack, size_t haystacklen, const void *needle, size_t needlelen) +{ + const char *h = haystack; + const char *n = needle; + size_t i; + + + if (needlelen > haystacklen) + return NULL; + + for(i = 0; i <= haystacklen - needlelen; i++) { + if (memcmp(h + i, n, needlelen) == 0) { + return (void*)(h + i); + } + } + return NULL; +} + +#endif +*/ +static dht_msg_type_t parse_message(struct bencode *bencode_p, + unsigned char *tid_return, int *tid_len, + unsigned char *id_return) +{ + // const unsigned char *p; + dht_msg_type_t type = DHT_MSG_INVALID; + struct bencode *b_tmp = NULL; + struct bencode *key_t = ben_dict_get_by_str(bencode_p, "t"); + struct bencode *key_args = ben_dict_get_by_str(bencode_p, "a"); + struct bencode *key_resp = ben_dict_get_by_str(bencode_p, "r"); + + /* Need to set tid, tid_len, and id_return. Then return the message type or msg_error. */ + + if ( key_t ) { + const char *tran = ben_str_val(key_t); + int tran_len = ben_str_len(key_t); + + memcpy(tid_return, tran, (size_t) tran_len); + *tid_len = tran_len; + } + + if ( key_args ) { + struct bencode *b_id = ben_dict_get_by_str( key_args, "id"); + const char *id = b_id ? ben_str_val(b_id) : NULL; + int id_len = ben_str_len(b_id); + + if ( id ) { + memcpy(id_return, id, id_len); + } + } + + if ( key_resp ) { + struct bencode *b_id = ben_dict_get_by_str( key_resp, "id"); + const char *id = b_id ? ben_str_val(b_id) : NULL; + int id_len = ben_str_len(b_id); + + if ( id ) { + memcpy(id_return, id, id_len); + } + } + + + if ( ben_dict_get_by_str(bencode_p, "y") && key_t ){ + /* This message is a KRPC message(aka DHT message) */ + + if ( ( b_tmp = ben_dict_get_by_str(bencode_p, "y") ) ) { + if ( !ben_cmp_with_str(b_tmp, "q") ) { /* Inbound queries */ + struct bencode *b_query = NULL; + const char *val = ben_str_val(b_tmp); + ks_log(KS_LOG_DEBUG, "Message Query [%s]\n", val); + + if ( !( b_query = ben_dict_get_by_str(bencode_p, "q") ) ) { + ks_log(KS_LOG_DEBUG, "Unable to locate query type field\n"); + } else { /* Has a query type */ + const char *query_type = ben_str_val(b_query); + if (!ben_cmp_with_str(b_query, "get_peers")) { + /* + { + 'a': { + 'id': '~\x12*\xe6L3\xba\x83\xafT\xe3\x02\x93\x0e\xae\xbd\xf8\xe1\x98\x87', + 'info_hash': 'w"E\x85\xdd97\xd1\xfe\x13Q\xfa\xdae\x9d\x8f\x86\xddN9' + }, + 'q': 'get_peers', + 't': '?\xf1', + 'v': 'LT\x01\x00', + 'y': 'q' + } + */ + + ks_log(KS_LOG_DEBUG, "get_peers query recieved\n"); + type = DHT_MSG_GET_PEERS; + goto done; + } else if (!ben_cmp_with_str(b_query, "ping")) { + /* + {'a': { + 'id': 'T\x1cd2\xc1\x85\xf4>?\x84#\xa8)\xd0`\x19y\xcf;\xda' + }, + 'q': 'ping', + 't': 'pn\x00\x00', + 'v': 'JC\x00\x00', + 'y': 'q' + } + */ + ks_log(KS_LOG_DEBUG, "ping query recieved from client \n"); + type = DHT_MSG_PING; + goto done; + } else if (!ben_cmp_with_str(b_query, "find_node")) { + /* + {'a': { + 'id': 'T\x1cq\x7f\xa9^\xf2\x97S\xceE\xad\xc9S\x9b\xa1\x1cCX\x8d', + 'target': 'T\x1cq\x7f\xa9C{\x83\xf9\xf6i&\x8b\x87*\xa2\xad\xad\x1a\xdd' + }, + 'q': 'find_node', + 't': '\x915\xbe\xfb', + 'v': 'UTu\x13', + 'y': 'q' + } + */ + type = DHT_MSG_FIND_NODE; + goto done; + } else if (!ben_cmp_with_str(b_query, "put")) { + ks_log(KS_LOG_DEBUG, "Recieved a store put request\n"); + type = DHT_MSG_STORE_PUT; + goto done; + } else { + ks_log(KS_LOG_DEBUG, "Unknown query type field [%s]\n", query_type); + } + } + + } else if ( !ben_cmp_with_str(b_tmp, "r") ) { /* Responses */ + const char *val = ben_str_val(b_tmp); + ks_log(KS_LOG_DEBUG, "Message Response [%s]\n", val); + type = DHT_MSG_REPLY; + goto done; + } else if ( !ben_cmp_with_str(b_tmp, "e") ) { + const char *val = ben_str_val(b_tmp); + ks_log(KS_LOG_DEBUG, "Message Error [%s]\n", val); + } else { + ks_log(KS_LOG_DEBUG, "Message Type Unknown!!!\n"); + } + } else { + ks_log(KS_LOG_DEBUG, "Message Type Unknown, has no 'y' key!!!\n"); + } + + /* + Decode the request or response + (b_tmp = ben_dict_get_by_str(bencode_p, "y"))) { + ks_log(KS_LOG_DEBUG, "query value: %s\n", ben_print(b_tmp)); + */ + } else { + ks_log(KS_LOG_DEBUG, "Message not a remote DHT request nor query\n"); + } + + /* Default to MSG ERROR */ + ks_log(KS_LOG_DEBUG, "Unknown or unsupported message type\n"); + return type; + + done: + return type; + + /* + if (dht_memmem(buf, buflen, "1:q4:ping", 9)) { + return DHT_MSG_PING; + } + + if (dht_memmem(buf, buflen, "1:q9:find_node", 14)) { + return DHT_MSG_FIND_NODE; + } + + if (dht_memmem(buf, buflen, "1:q9:get_peers", 14)) { + return DHT_MSG_GET_PEERS; + } + + if (dht_memmem(buf, buflen, "1:q13:announce_peer", 19)) { + return DHT_MSG_ANNOUNCE_PEER; + } + + char *val = ben_str_val(b_tmp); + + */ + + + /* + if (tid_return) { + p = dht_memmem(buf, buflen, "1:t", 3); + if (p) { + long l; + char *q; + l = strtol((char*)p + 3, &q, 10); + if (q && *q == ':' && l > 0 && l < *tid_len) { + CHECK(q + 1, l); + memcpy(tid_return, q + 1, l); + *tid_len = l; + } else + *tid_len = 0; + } + } + if (id_return) { + p = dht_memmem(buf, buflen, "2:id20:", 7); + if (p) { + CHECK(p + 7, 20); + memcpy(id_return, p + 7, 20); + } else { + memset(id_return, 0, 20); + } + } + if (info_hash_return) { + p = dht_memmem(buf, buflen, "9:info_hash20:", 14); + if (p) { + CHECK(p + 14, 20); + memcpy(info_hash_return, p + 14, 20); + } else { + memset(info_hash_return, 0, 20); + } + } + if (port_return) { + p = dht_memmem(buf, buflen, "porti", 5); + if (p) { + long l; + char *q; + l = strtol((char*)p + 5, &q, 10); + if (q && *q == 'e' && l > 0 && l < 0x10000) { + *port_return = l; + } else { + *port_return = 0; + } + } else { + *port_return = 0; + } + } + if (target_return) { + p = dht_memmem(buf, buflen, "6:target20:", 11); + if (p) { + CHECK(p + 11, 20); + memcpy(target_return, p + 11, 20); + } else { + memset(target_return, 0, 20); + } + } + if (token_return) { + p = dht_memmem(buf, buflen, "5:token", 7); + if (p) { + long l; + char *q; + l = strtol((char*)p + 7, &q, 10); + if (q && *q == ':' && l > 0 && l < *token_len) { + CHECK(q + 1, l); + memcpy(token_return, q + 1, l); + *token_len = l; + } else { + *token_len = 0; + } + } else { + *token_len = 0; + } + } + + if (nodes_len) { + p = dht_memmem(buf, buflen, "5:nodes", 7); + if (p) { + long l; + char *q; + l = strtol((char*)p + 7, &q, 10); + if (q && *q == ':' && l > 0 && l <= *nodes_len) { + CHECK(q + 1, l); + memcpy(nodes_return, q + 1, l); + *nodes_len = l; + } else { + *nodes_len = 0; + } + } else { + *nodes_len = 0; + } + } + + if (nodes6_len) { + p = dht_memmem(buf, buflen, "6:nodes6", 8); + if (p) { + long l; + char *q; + l = strtol((char*)p + 8, &q, 10); + if (q && *q == ':' && l > 0 && l <= *nodes6_len) { + CHECK(q + 1, l); + memcpy(nodes6_return, q + 1, l); + *nodes6_len = l; + } else { + *nodes6_len = 0; + } + } else { + *nodes6_len = 0; + } + } + + if (values_len || values6_len) { + p = dht_memmem(buf, buflen, "6:valuesl", 9); + if (p) { + int i = p - buf + 9; + int j = 0, j6 = 0; + while (1) { + long l; + char *q; + l = strtol((char*)buf + i, &q, 10); + if (q && *q == ':' && l > 0) { + CHECK(q + 1, l); + i = q + 1 + l - (char*)buf; + if (l == 6) { + if (j + l > *values_len) { + continue; + } + memcpy((char*)values_return + j, q + 1, l); + j += l; + } else if (l == 18) { + if (j6 + l > *values6_len) { + continue; + } + memcpy((char*)values6_return + j6, q + 1, l); + j6 += l; + } else { + ks_log(KS_LOG_DEBUG, "Received weird value -- %d bytes.\n", (int)l); + } + } else { + break; + } + } + if (i >= buflen || buf[i] != 'e') { + ks_log(KS_LOG_DEBUG, "eek... unexpected end for values.\n"); + } + if (values_len) { + *values_len = j; + } + if (values6_len) { + *values6_len = j6; + } + } else { + if (values_len) { + *values_len = 0; + } + if (values6_len) { + *values6_len = 0; + } + } + } + + if (want_return) { + p = dht_memmem(buf, buflen, "4:wantl", 7); + if (p) { + int i = p - buf + 7; + *want_return = 0; + while (buf[i] > '0' && buf[i] <= '9' && buf[i + 1] == ':' && i + 2 + buf[i] - '0' < buflen) { + CHECK(buf + i + 2, buf[i] - '0'); + if (buf[i] == '2' && memcmp(buf + i + 2, "n4", 2) == 0) { + *want_return |= WANT4; + } else if (buf[i] == '2' && memcmp(buf + i + 2, "n6", 2) == 0) { + *want_return |= WANT6; + } else { + ks_log(KS_LOG_DEBUG, "eek... unexpected want flag (%c)\n", buf[i]); + } + i += 2 + buf[i] - '0'; + } + if (i >= buflen || buf[i] != 'e') { + ks_log(KS_LOG_DEBUG, "eek... unexpected end for want.\n"); + } + } else { + *want_return = -1; + } + } + +#undef CHECK + + if (dht_memmem(buf, buflen, "1:y1:r", 6)) { + return DHT_MSG_REPLY; + } + + if (dht_memmem(buf, buflen, "1:y1:e", 6)) { + return DHT_MSG_ERROR; + } + + if (!dht_memmem(buf, buflen, "1:y1:q", 6)) { + return DHT_MSG_INVALID; + } + + if (dht_memmem(buf, buflen, "1:q4:ping", 9)) { + return DHT_MSG_PING; + } + + if (dht_memmem(buf, buflen, "1:q9:find_node", 14)) { + return DHT_MSG_FIND_NODE; + } + + if (dht_memmem(buf, buflen, "1:q9:get_peers", 14)) { + return DHT_MSG_GET_PEERS; + } + + if (dht_memmem(buf, buflen, "1:q13:announce_peer", 19)) { + return DHT_MSG_ANNOUNCE_PEER; + } + + return DHT_MSG_INVALID; + + overflow: + ks_log(KS_LOG_DEBUG, "Truncated message.\n"); + return DHT_MSG_INVALID; + */ + +} + +/* b64encode function taken from kws.c. Maybe worth exposing a function like this. */ +static const char c64[65] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; +static int b64encode(unsigned char *in, ks_size_t ilen, unsigned char *out, ks_size_t olen) +{ + int y=0,bytes=0; + ks_size_t x=0; + unsigned int b=0,l=0; + + if(olen) { + } + + for(x=0;x= 6) { + out[bytes++] = c64[(b>>(l-=6))%64]; + if(++y!=72) { + continue; + } + //out[bytes++] = '\n'; + y=0; + } + } + + if (l > 0) { + out[bytes++] = c64[((b%16)<<(6-l))%64]; + } + if (l != 0) while (l < 6) { + out[bytes++] = '=', l += 2; + } + + return 0; +} + + +/* + This function should generate the fields needed for the mutable message. + + Save the sending for another api, and possibly a third to generate and send all in one. + NOTE: + 1. When sending a mutable message, CAS(compare and swap) values need to be validated. + 2. Mutable messages MUST have a new key pair generated for each different mutable message. + The announce key is generated as a hash from the public key. To use one key pair for multiple messages, + a salt MUST be used that is unique and constant per message. + 3. The target hash will be generated here, and will be the hash that must be used for announcing the message, and updating it. + +*/ +KS_DECLARE(int) ks_dht_generate_mutable_storage_args(struct bencode *data, int64_t sequence, int cas, + unsigned char *id, int id_len, /* querying nodes id */ + const unsigned char *sk, const unsigned char *pk, + unsigned char *salt, unsigned long long salt_length, + unsigned char *token, unsigned long long token_length, + unsigned char *signature, unsigned long long *signature_length, + struct bencode **arguments) +{ + struct bencode *arg = NULL, *sig = NULL; + unsigned char *encoded_message = NULL, *encoded_data = NULL; + size_t encoded_message_size = 0, encoded_data_size = 0; + int err = 0; + + if ( !data || !sequence || !id || !id_len || !sk || !pk || + !token || !token_length || !signature || !signature_length) { + ks_log(KS_LOG_ERROR, "Missing required input\n"); + return -1; + } + + if ( arguments && *arguments) { + ks_log(KS_LOG_ERROR, "Arguments already defined.\n"); + return -1; + } + + if ( salt && salt_length > 64 ) { + ks_log(KS_LOG_ERROR, "Salt is too long. Can not be longer than 64 bytes\n"); + return -1; + } + + if ( sequence && sequence < 0 ) { + ks_log(KS_LOG_ERROR, "Sequence out of acceptable range\n"); + return -1; + } + + encoded_data = (unsigned char *) ben_encode(&encoded_data_size, data); + + if ( encoded_data_size > 1000 ) { + ks_log(KS_LOG_ERROR, "Message is too long. Max is 1000 bytes\n"); + free(encoded_data); + return -1; + } + + /* Need to dynamically allocate a bencoded object for the signature. */ + sig = ben_dict(); + + if ( salt ) { + ben_dict_set(sig, ben_blob("salt", 4), ben_blob(salt, salt_length)); + } + + ben_dict_set(sig, ben_blob("seq", 3), ben_int(sequence)); + ben_dict_set(sig, ben_blob("v", 1), ben_blob(encoded_data, encoded_data_size)); + + encoded_message = ben_encode(&encoded_message_size, sig); + ks_log(KS_LOG_DEBUG, "Encoded data %d [%.*s]\n", encoded_message_size, encoded_message_size, encoded_message); + + err = crypto_sign_detached(signature, NULL, encoded_message, encoded_message_size, sk); + if ( err ) { + ks_log(KS_LOG_ERROR, "Failed to sign message with provided secret key\n"); + return 1; + } + + free(encoded_message); + ben_free(sig); + + arg = ben_dict(); + + if ( cas ) { + ben_dict_set(arg, ben_blob("cas", 3), ben_int(cas)); + } + + ben_dict_set(arg, ben_blob("id", 2), ben_blob(id, id_len)); + ben_dict_set(arg, ben_blob("k", 1), ben_blob(pk, 32)); /* All ed25519 public keys are 32 bytes */ + + if ( salt ) { + ben_dict_set(arg, ben_blob("salt", 4), ben_blob(salt, salt_length)); + } + + ben_dict_set(arg, ben_blob("seq", 3), ben_int(sequence)); + ben_dict_set(arg, ben_blob("sig", 3), ben_blob(signature, (size_t) *signature_length)); + ben_dict_set(arg, ben_blob("token", 5), ben_blob(token, token_length)); + ben_dict_set(arg, ben_blob("v", 1), ben_blob(encoded_data, encoded_data_size)); + + *arguments = arg; + + free(encoded_data); + + return 0; +} + +KS_DECLARE(int) ks_dht_calculate_mutable_storage_target(unsigned char *pk, unsigned char *salt, int salt_length, unsigned char *target, int target_length) +{ + SHA_CTX sha; + unsigned char sha1[20] = {0}; + + /* Generate target sha-1 hash */ + SHA1_Init(&sha); + SHA1_Update(&sha, pk, 32); + + if ( salt ) { + SHA1_Update(&sha, salt, salt_length); + } + + SHA1_Final(sha1, &sha); + b64encode(sha1, 20, target, target_length); + + return 0; +} + +KS_DECLARE(int) ks_dht_send_message_mutable_cjson(dht_handle_t *h, unsigned char *sk, unsigned char *pk, char **node_id, + char *message_id, int sequence, cJSON *message, ks_time_t life) +{ + struct bencode *body = ben_dict(); + char *output = NULL; + char *json = cJSON_PrintUnformatted(message); + int err = 0; + size_t output_len = 0; + + ben_dict_set(body, ben_blob("ct", 2), ben_blob("json", 4)); + ben_dict_set(body, ben_blob("b", 1), ben_blob(json, strlen(json))); + + output = (char *)ben_encode(&output_len, body); + + err = ks_dht_send_message_mutable(h, sk, pk, node_id, message_id, sequence, output, life); + free(json); + free(output); + ben_free(body); + + return err; +} + +KS_DECLARE(int) ks_dht_send_message_mutable(dht_handle_t *h, unsigned char *sk, unsigned char *pk, char **node_id, + char *message_id, int sequence, char *message, ks_time_t life) +{ + unsigned char target[40], signature[crypto_sign_BYTES]; + unsigned long long signature_length = crypto_sign_BYTES; + int message_length = strlen(message); + unsigned char tid[4]; + unsigned char *salt = (unsigned char *)message_id; + int salt_length = strlen(message_id); + struct ks_dht_store_entry_s *entry = NULL; + struct bencode *b_message = ben_blob(message, message_length); + struct bencode *args = NULL, *data = NULL; + char buf[1500]; + size_t buf_len = 0; + int err = 0; + h->now = ks_time_now_sec(); + + if ( !life ) { + /* Default to now plus 10 minutes */ + life = 600; + } + + make_tid(tid, "mm", 0); + + ks_dht_calculate_mutable_storage_target(pk, salt, salt_length, target, 40); + + if ( (entry = ks_dht_store_fetch(h->store, (char *)target)) ) { + if ( sequence < entry->serial ) { + sequence = entry->serial; + } + } + + + /* +int ks_dht_generate_mutable_storage_args(struct bencode *data, int64_t sequence, int cas, + unsigned char *id, int id_len, + const unsigned char *sk, const unsigned char *pk, + unsigned char *salt, unsigned long long salt_length, + unsigned char *token, unsigned long long token_length, + unsigned char *signature, unsigned long long *signature_length, + struct bencode **arguments) */ + + + err = ks_dht_generate_mutable_storage_args(b_message, sequence, 1, + h->myid, 20, + sk, pk, + salt, salt_length, + (unsigned char *) target, 40, + signature, &signature_length, + &args); + + if ( err ) { + return err; + } + + data = ben_dict(); + ben_dict_set(data, ben_blob("a", 1), args); + ben_dict_set(data, ben_blob("t", 1), ben_blob(tid, 4)); + ben_dict_set(data, ben_blob("y", 1), ben_blob("q", 1)); + ben_dict_set(data, ben_blob("q", 1), ben_blob("put", 3)); + + buf_len = ben_encode2(buf, 1500, data); + + err = ks_dht_store_entry_create(h, data, &entry, life, 1); + if ( err ) { + return err; + } + + ks_dht_store_replace(h->store, entry); + + /* dht_search() announce of this hash */ + dht_search(h, (const unsigned char *)entry->key, h->port, AF_INET, NULL, NULL); + + if ( node_id && node_id[0] ) { + /* We're being told where to send these messages. */ + int x = 0; + + for ( x = 0; node_id[x] != NULL; x++ ) { + unsigned char node_id_bin[20] = {0}; + struct node *n = NULL; + size_t size = 0; + + sodium_hex2bin(node_id_bin, 20, node_id[x], 40, ":", &size, NULL); + + n = find_node(h, node_id_bin, AF_INET); + + if ( !n ) { + n = find_node(h, node_id_bin, AF_INET6); + } + + if ( !n ) { + ks_log(KS_LOG_INFO, "Unable to find node with id\n"); + continue; + } + + err |= dht_send(h, buf, buf_len, 0, &n->ss); + } + } else { + /* Client api assumes that we'll figure out where to send the message. + We should find a bucket that resolves to the key, and send to all nodes in that bucket. + */ + struct bucket *b4 = find_bucket(h, (const unsigned char *)entry->key, AF_INET); + struct bucket *b6 = find_bucket(h, (const unsigned char *)entry->key, AF_INET6); + struct node *n = NULL; + + if ( b4 ) { + for ( n = b4->nodes; n->next; n = n->next ) { + err |= dht_send(h, buf, buf_len, 0, &n->ss); + } + } + + if ( b6 ) { + for ( n = b6->nodes; n->next; n = n->next ) { + err |= dht_send(h, buf, buf_len, 0, &n->ss); + } + } + } + + return err; +} + +// KS_DECLARE(int) ks_dht_send_message_mutable( +KS_DECLARE(int) ks_dht_api_find_node(dht_handle_t *h, char *node_id_hex, char *target_hex, ks_bool_t ipv6) +{ + unsigned char node_id[20] = {0}, target[20] = {0}, tid[4] = {0}; + struct node *n = NULL; + size_t size = 0; + + if ( strlen(node_id_hex) != 40 || strlen(target_hex) != 40 ) { + ks_log(KS_LOG_INFO, "node_id(%s)[%d] and target(%s)[%d] must each be 40 hex characters\n", node_id_hex, strlen(node_id_hex), target_hex, strlen(target_hex)); + return 1; + } + + sodium_hex2bin(node_id, 20, node_id_hex, 40, ":", &size, NULL); + sodium_hex2bin(target, 20, target_hex, 40, ":", &size, NULL); + + n = find_node(h, node_id, ipv6 ? AF_INET6 : AF_INET); + + if ( !n ) { + ks_log(KS_LOG_INFO, "Unable to find node with id[%s]\n", node_id_hex); + return 1; + } + + make_tid(tid, "fn", 0); + + return send_find_node(h, &n->ss, tid, 4, target, sizeof(target), WANT4 | WANT6, n->reply_time >= h->now - 15); +} + + +/* For Emacs: + * Local Variables: + * mode:c + * indent-tabs-mode:t + * tab-width:4 + * c-basic-offset:4 + * End: + * For VIM: + * vim:set softtabstop=4 shiftwidth=4 tabstop=4 noet: + */ diff --git a/libs/libks/src/ks_dso.c b/libs/libks/src/ks_dso.c new file mode 100755 index 0000000000..28d5ddf54f --- /dev/null +++ b/libs/libks/src/ks_dso.c @@ -0,0 +1,132 @@ +/* + * Cross Platform dso/dll load abstraction + * Copyright(C) 2008 Michael Jerris + * + * You may opt to use, copy, modify, merge, publish, distribute and/or sell + * copies of the Software, and permit persons to whom the Software is + * furnished to do so. + * + * This work is provided under this license on an "as is" basis, without warranty of any kind, + * either expressed or implied, including, without limitation, warranties that the covered code + * is free of defects, merchantable, fit for a particular purpose or non-infringing. The entire + * risk as to the quality and performance of the covered code is with you. Should any covered + * code prove defective in any respect, you (not the initial developer or any other contributor) + * assume the cost of any necessary servicing, repair or correction. This disclaimer of warranty + * constitutes an essential part of this license. No use of any covered code is authorized hereunder + * except under this disclaimer. + * + */ + +#include "ks.h" + +#ifdef WIN32 +#include +#include + +KS_DECLARE(ks_status_t) ks_dso_destroy(ks_dso_lib_t *lib) { + if (lib && *lib) { + FreeLibrary(*lib); + *lib = NULL; + } + return KS_STATUS_SUCCESS; +} + +KS_DECLARE(ks_dso_lib_t) ks_dso_open(const char *path, char **err) { + HINSTANCE lib; +#ifdef UNICODE + size_t len = strlen(path) + 1; + wchar_t *wpath = malloc(len); + + size_t converted; + mbstowcs_s(&converted, wpath, len, path, _TRUNCATE); +#else + char * wpath = path; +#endif + lib = LoadLibraryEx(wpath, NULL, 0); + + if (!lib) { + LoadLibraryEx(wpath, NULL, LOAD_WITH_ALTERED_SEARCH_PATH); + } + + if (!lib) { + DWORD error = GetLastError(); + char tmp[80]; + sprintf(tmp, "dll open error [%lu]\n", error); + *err = strdup(tmp); + } + +#ifdef UNICODE + free(wpath); +#endif + return lib; +} + +KS_DECLARE(void*) ks_dso_func_sym(ks_dso_lib_t lib, const char *sym, char **err) { + FARPROC func = GetProcAddress(lib, sym); + if (!func) { + DWORD error = GetLastError(); + char tmp[80]; + sprintf(tmp, "dll sym error [%lu]\n", error); + *err = strdup(tmp); + } + return (void *)(intptr_t)func; // this should really be addr - ks_dso_func_data +} + +#else + +/* +** {======================================================================== +** This is an implementation of loadlib based on the dlfcn interface. +** The dlfcn interface is available in Linux, SunOS, Solaris, IRIX, FreeBSD, +** NetBSD, AIX 4.2, HPUX 11, and probably most other Unix flavors, at least +** as an emulation layer on top of native functions. +** ========================================================================= +*/ + +#include + +KS_DECLARE(ks_status_t) ks_dso_destroy(ks_dso_lib_t *lib) { + int rc; + if (lib && *lib) { + rc = dlclose(*lib); + if (rc) { + //ks_log(KS_LOG_ERROR, "Failed to close lib %p: %s\n", *lib, dlerror()); + return KS_STATUS_FAIL; + } + //ks_log(KS_LOG_DEBUG, "lib %p was closed with success\n", *lib); + *lib = NULL; + return KS_STATUS_SUCCESS; + } + //ks_log(KS_LOG_ERROR, "Invalid pointer provided to ks_dso_destroy\n"); + return KS_STATUS_FAIL; +} + +KS_DECLARE(ks_dso_lib_t) ks_dso_open(const char *path, char **err) { + void *lib = dlopen(path, RTLD_NOW | RTLD_LOCAL); + if (lib == NULL) { + *err = strdup(dlerror()); + } + return lib; +} + +KS_DECLARE(void *) ks_dso_func_sym(ks_dso_lib_t lib, const char *sym, char **err) { + void *func = dlsym(lib, sym); + if (!func) { + *err = strdup(dlerror()); + } + return func; +} +#endif + +/* }====================================================== */ + +/* For Emacs: + * Local Variables: + * mode:c + * indent-tabs-mode:t + * tab-width:4 + * c-basic-offset:4 + * End: + * For VIM: + * vim:set softtabstop=4 shiftwidth=4 tabstop=4 noet: + */ diff --git a/libs/libks/src/ks_hash.c b/libs/libks/src/ks_hash.c new file mode 100644 index 0000000000..b090e4d8cd --- /dev/null +++ b/libs/libks/src/ks_hash.c @@ -0,0 +1,648 @@ +/* + * Copyright (c) 2002, Christopher Clark + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * * Neither the name of the original author; nor the names of any contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER + * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "ks.h" +#include "ks_hash.h" + +struct entry +{ + void *k, *v; + unsigned int h; + ks_hash_flag_t flags; + ks_hash_destructor_t destructor; + struct entry *next; +}; + +struct ks_hash_iterator { + unsigned int pos; + ks_locked_t locked; + struct entry *e; + struct ks_hash *h; +}; + +struct ks_hash { + ks_pool_t *pool; + unsigned int tablelength; + struct entry **table; + unsigned int entrycount; + unsigned int loadlimit; + unsigned int primeindex; + unsigned int (*hashfn) (void *k); + int (*eqfn) (void *k1, void *k2); + ks_hash_flag_t flags; + ks_hash_destructor_t destructor; + ks_rwl_t *rwl; + ks_mutex_t *mutex; + uint32_t readers; +}; + +/*****************************************************************************/ + +/*****************************************************************************/ +static inline unsigned int +hash(ks_hash_t *h, void *k) +{ + /* Aim to protect against poor hash functions by adding logic here + * - logic taken from java 1.4 ks_hash source */ + unsigned int i = h->hashfn(k); + i += ~(i << 9); + i ^= ((i >> 14) | (i << 18)); /* >>> */ + i += (i << 4); + i ^= ((i >> 10) | (i << 22)); /* >>> */ + return i; +} + + +/*****************************************************************************/ +/* indexFor */ +static __inline__ unsigned int +indexFor(unsigned int tablelength, unsigned int hashvalue) { + return (hashvalue % tablelength); +} + +/* Only works if tablelength == 2^N */ +/*static inline unsigned int + indexFor(unsigned int tablelength, unsigned int hashvalue) + { + return (hashvalue & (tablelength - 1u)); + } +*/ + +/*****************************************************************************/ +//#define freekey(X) free(X) + +/* + Credit for primes table: Aaron Krowne + http://br.endernet.org/~akrowne/ + http://planetmath.org/encyclopedia/GoodKs_HashPrimes.html +*/ +static const unsigned int primes[] = { + 53, 97, 193, 389, + 769, 1543, 3079, 6151, + 12289, 24593, 49157, 98317, + 196613, 393241, 786433, 1572869, + 3145739, 6291469, 12582917, 25165843, + 50331653, 100663319, 201326611, 402653189, + 805306457, 1610612741 +}; +const unsigned int prime_table_length = sizeof(primes)/sizeof(primes[0]); +const float max_load_factor = 0.65f; + +/*****************************************************************************/ + +static void ks_hash_cleanup(ks_pool_t *mpool, void *ptr, void *arg, int type, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t ctype) +{ + //ks_hash_t *hash = (ks_hash_t *) ptr; + + switch(action) { + case KS_MPCL_ANNOUNCE: + break; + case KS_MPCL_TEARDOWN: + break; + case KS_MPCL_DESTROY: + //ks_hash_destroy(&hash); + break; + } + +} + +KS_DECLARE(ks_status_t) ks_hash_create(ks_hash_t **hp, ks_hash_mode_t mode, ks_hash_flag_t flags, ks_pool_t *pool) +{ + return ks_hash_create_ex(hp, 16, NULL, NULL, mode, flags, NULL, pool); +} + +KS_DECLARE(void) ks_hash_set_flags(ks_hash_t *h, ks_hash_flag_t flags) +{ + h->flags = flags; +} + +KS_DECLARE(void) ks_hash_set_destructor(ks_hash_t *h, ks_hash_destructor_t destructor) +{ + h->destructor = destructor; +} + + +KS_DECLARE(ks_status_t) +ks_hash_create_ex(ks_hash_t **hp, unsigned int minsize, + unsigned int (*hashf) (void*), + int (*eqf) (void*,void*), ks_hash_mode_t mode, ks_hash_flag_t flags, ks_hash_destructor_t destructor, ks_pool_t *pool) +{ + ks_hash_t *h; + unsigned int pindex, size = primes[0]; + + switch(mode) { + case KS_HASH_MODE_CASE_INSENSITIVE: + ks_assert(hashf == NULL); + hashf = ks_hash_default_ci; + break; + case KS_HASH_MODE_INT: + ks_assert(hashf == NULL); + ks_assert(eqf == NULL); + hashf = ks_hash_default_int; + eqf = ks_hash_equalkeys_int; + break; + case KS_HASH_MODE_INT64: + ks_assert(hashf == NULL); + ks_assert(eqf == NULL); + hashf = ks_hash_default_int64; + eqf = ks_hash_equalkeys_int64; + break; + case KS_HASH_MODE_PTR: + ks_assert(hashf == NULL); + ks_assert(eqf == NULL); + hashf = ks_hash_default_ptr; + eqf = ks_hash_equalkeys_ptr; + break; + default: + break; + } + + if (flags == KS_HASH_FLAG_DEFAULT) { + flags = KS_HASH_FLAG_FREE_KEY | KS_HASH_FLAG_DUP_CHECK | KS_HASH_FLAG_RWLOCK; + } + + ks_assert(pool); + if (!hashf) hashf = ks_hash_default; + if (!eqf) eqf = ks_hash_equalkeys; + if (!minsize) minsize = 16; + + /* Check requested ks_hash isn't too large */ + if (minsize > (1u << 30)) {*hp = NULL; return KS_STATUS_FAIL;} + /* Enforce size as prime */ + for (pindex=0; pindex < prime_table_length; pindex++) { + if (primes[pindex] > minsize) { + size = primes[pindex]; + break; + } + } + + h = (ks_hash_t *) ks_pool_alloc(pool, sizeof(ks_hash_t)); + h->pool = pool; + h->flags = flags; + h->destructor = destructor; + + if ((flags & KS_HASH_FLAG_RWLOCK)) { + ks_rwl_create(&h->rwl, h->pool); + } + + ks_mutex_create(&h->mutex, KS_MUTEX_FLAG_DEFAULT, h->pool); + + + if (NULL == h) abort(); /*oom*/ + + h->table = (struct entry **)ks_pool_alloc(h->pool, sizeof(struct entry*) * size); + + if (NULL == h->table) abort(); /*oom*/ + + //memset(h->table, 0, size * sizeof(struct entry *)); + h->tablelength = size; + h->primeindex = pindex; + h->entrycount = 0; + h->hashfn = hashf; + h->eqfn = eqf; + h->loadlimit = (unsigned int) ceil(size * max_load_factor); + + *hp = h; + + ks_pool_set_cleanup(pool, h, NULL, 0, ks_hash_cleanup); + + return KS_STATUS_SUCCESS; +} + +/*****************************************************************************/ +static int +ks_hash_expand(ks_hash_t *h) +{ + /* Double the size of the table to accomodate more entries */ + struct entry **newtable; + struct entry *e; + struct entry **pE; + unsigned int newsize, i, index; + /* Check we're not hitting max capacity */ + if (h->primeindex == (prime_table_length - 1)) return 0; + newsize = primes[++(h->primeindex)]; + + newtable = (struct entry **)ks_pool_alloc(h->pool, sizeof(struct entry*) * newsize); + if (NULL != newtable) + { + memset(newtable, 0, newsize * sizeof(struct entry *)); + /* This algorithm is not 'stable'. ie. it reverses the list + * when it transfers entries between the tables */ + for (i = 0; i < h->tablelength; i++) { + while (NULL != (e = h->table[i])) { + h->table[i] = e->next; + index = indexFor(newsize,e->h); + e->next = newtable[index]; + newtable[index] = e; + } + } + ks_pool_safe_free(h->pool, h->table); + h->table = newtable; + } + /* Plan B: realloc instead */ + else + { + newtable = (struct entry **) + ks_pool_resize(h->pool, h->table, newsize * sizeof(struct entry *)); + if (NULL == newtable) { (h->primeindex)--; return 0; } + h->table = newtable; + memset(newtable[h->tablelength], 0, newsize - h->tablelength); + for (i = 0; i < h->tablelength; i++) { + for (pE = &(newtable[i]), e = *pE; e != NULL; e = *pE) { + index = indexFor(newsize,e->h); + + if (index == i) { + pE = &(e->next); + } else { + *pE = e->next; + e->next = newtable[index]; + newtable[index] = e; + } + } + } + } + h->tablelength = newsize; + h->loadlimit = (unsigned int) ceil(newsize * max_load_factor); + return -1; +} + +/*****************************************************************************/ +KS_DECLARE(unsigned int) +ks_hash_count(ks_hash_t *h) +{ + return h->entrycount; +} + +static void * _ks_hash_remove(ks_hash_t *h, void *k, unsigned int hashvalue, unsigned int index) { + /* TODO: consider compacting the table when the load factor drops enough, + * or provide a 'compact' method. */ + + struct entry *e; + struct entry **pE; + void *v; + + + pE = &(h->table[index]); + e = *pE; + while (NULL != e) { + /* Check hash value to short circuit heavier comparison */ + if ((hashvalue == e->h) && (h->eqfn(k, e->k))) { + *pE = e->next; + h->entrycount--; + v = e->v; + if (e->flags & KS_HASH_FLAG_FREE_KEY) { + ks_pool_free(h->pool, e->k); + } + if (e->flags & KS_HASH_FLAG_FREE_VALUE) { + ks_pool_safe_free(h->pool, e->v); + v = NULL; + } else if (e->destructor) { + e->destructor(e->v); + v = e->v = NULL; + } else if (h->destructor) { + h->destructor(e->v); + v = e->v = NULL; + } + ks_pool_safe_free(h->pool, e); + return v; + } + pE = &(e->next); + e = e->next; + } + return NULL; +} + +/*****************************************************************************/ +KS_DECLARE(int) +ks_hash_insert_ex(ks_hash_t *h, void *k, void *v, ks_hash_flag_t flags, ks_hash_destructor_t destructor) +{ + struct entry *e; + unsigned int hashvalue = hash(h, k); + unsigned index = indexFor(h->tablelength, hashvalue); + + ks_hash_write_lock(h); + + if (!flags) { + flags = h->flags; + } + + if (flags & KS_HASH_FLAG_DUP_CHECK) { + _ks_hash_remove(h, k, hashvalue, index); + } + + if (++(h->entrycount) > h->loadlimit) + { + /* Ignore the return value. If expand fails, we should + * still try cramming just this value into the existing table + * -- we may not have memory for a larger table, but one more + * element may be ok. Next time we insert, we'll try expanding again.*/ + ks_hash_expand(h); + index = indexFor(h->tablelength, hashvalue); + } + e = (struct entry *)ks_pool_alloc(h->pool, sizeof(struct entry)); + if (NULL == e) { --(h->entrycount); return 0; } /*oom*/ + e->h = hashvalue; + e->k = k; + e->v = v; + e->flags = flags; + e->destructor = destructor; + e->next = h->table[index]; + h->table[index] = e; + + ks_hash_write_unlock(h); + + return -1; +} + + +KS_DECLARE(void) ks_hash_write_lock(ks_hash_t *h) +{ + if ((h->flags & KS_HASH_FLAG_RWLOCK)) { + ks_rwl_write_lock(h->rwl); + } else { + ks_mutex_lock(h->mutex); + } +} + +KS_DECLARE(void) ks_hash_write_unlock(ks_hash_t *h) +{ + if ((h->flags & KS_HASH_FLAG_RWLOCK)) { + ks_rwl_write_unlock(h->rwl); + } else { + ks_mutex_unlock(h->mutex); + } +} + +KS_DECLARE(ks_status_t) ks_hash_read_lock(ks_hash_t *h) +{ + if (!(h->flags & KS_HASH_FLAG_RWLOCK)) { + return KS_STATUS_INACTIVE; + } + + ks_rwl_read_lock(h->rwl); + + ks_mutex_lock(h->mutex); + h->readers++; + ks_mutex_unlock(h->mutex); + + return KS_STATUS_SUCCESS; +} + +KS_DECLARE(ks_status_t) ks_hash_read_unlock(ks_hash_t *h) +{ + if (!(h->flags & KS_HASH_FLAG_RWLOCK)) { + return KS_STATUS_INACTIVE; + } + + ks_mutex_lock(h->mutex); + h->readers--; + ks_mutex_unlock(h->mutex); + + ks_rwl_read_unlock(h->rwl); + + return KS_STATUS_SUCCESS; +} + +/*****************************************************************************/ +KS_DECLARE(void *) /* returns value associated with key */ +ks_hash_search(ks_hash_t *h, void *k, ks_locked_t locked) +{ + struct entry *e; + unsigned int hashvalue, index; + void *v = NULL; + + ks_assert(locked != KS_READLOCKED || (h->flags & KS_HASH_FLAG_RWLOCK)); + + hashvalue = hash(h,k); + index = indexFor(h->tablelength,hashvalue); + + if (locked == KS_READLOCKED) { + ks_rwl_read_lock(h->rwl); + + ks_mutex_lock(h->mutex); + h->readers++; + ks_mutex_unlock(h->mutex); + } + + e = h->table[index]; + while (NULL != e) { + /* Check hash value to short circuit heavier comparison */ + if ((hashvalue == e->h) && (h->eqfn(k, e->k))) { + v = e->v; + break; + } + e = e->next; + } + + return v; +} + +/*****************************************************************************/ +KS_DECLARE(void *) /* returns value associated with key */ +ks_hash_remove(ks_hash_t *h, void *k) +{ + void *v; + unsigned int hashvalue = hash(h,k); + + ks_hash_write_lock(h); + v = _ks_hash_remove(h, k, hashvalue, indexFor(h->tablelength,hashvalue)); + ks_hash_write_unlock(h); + + return v; +} + +/*****************************************************************************/ +/* destroy */ +KS_DECLARE(void) +ks_hash_destroy(ks_hash_t **h) +{ + unsigned int i; + struct entry *e, *f; + struct entry **table = (*h)->table; + ks_pool_t *pool; + + ks_hash_write_lock(*h); + + for (i = 0; i < (*h)->tablelength; i++) { + e = table[i]; + while (NULL != e) { + f = e; e = e->next; + + if (f->flags & KS_HASH_FLAG_FREE_KEY) { + ks_pool_free((*h)->pool, f->k); + } + + if (f->flags & KS_HASH_FLAG_FREE_VALUE) { + ks_pool_safe_free((*h)->pool, f->v); + } else if (f->destructor) { + f->destructor(f->v); + f->v = NULL; + } else if ((*h)->destructor) { + (*h)->destructor(f->v); + f->v = NULL; + } + ks_pool_safe_free((*h)->pool, f); + } + } + + pool = (*h)->pool; + ks_pool_safe_free(pool, (*h)->table); + ks_hash_write_unlock(*h); + if ((*h)->rwl) ks_pool_free(pool, (*h)->rwl); + ks_pool_free(pool, (*h)->mutex); + ks_pool_free(pool, *h); + pool = NULL; + *h = NULL; + + +} + +KS_DECLARE(void) ks_hash_last(ks_hash_iterator_t **iP) +{ + ks_hash_iterator_t *i = *iP; + + //ks_assert(i->locked != KS_READLOCKED || (i->h->flags & KS_HASH_FLAG_RWLOCK)); + + if (i->locked == KS_READLOCKED) { + ks_mutex_lock(i->h->mutex); + i->h->readers--; + ks_mutex_unlock(i->h->mutex); + + ks_rwl_read_unlock(i->h->rwl); + } + + ks_pool_free(i->h->pool, i); + + *iP = NULL; +} + +KS_DECLARE(ks_hash_iterator_t *) ks_hash_next(ks_hash_iterator_t **iP) +{ + + ks_hash_iterator_t *i = *iP; + + if (i->e) { + if ((i->e = i->e->next) != 0) { + return i; + } else { + i->pos++; + } + } + + while(i->pos < i->h->tablelength && !i->h->table[i->pos]) { + i->pos++; + } + + if (i->pos >= i->h->tablelength) { + goto end; + } + + if ((i->e = i->h->table[i->pos]) != 0) { + return i; + } + + end: + + ks_hash_last(iP); + + return NULL; +} + +KS_DECLARE(ks_hash_iterator_t *) ks_hash_first(ks_hash_t *h, ks_locked_t locked) +{ + ks_hash_iterator_t *iterator; + + ks_assert(locked != KS_READLOCKED || (h->flags & KS_HASH_FLAG_RWLOCK)); + + iterator = ks_pool_alloc(h->pool, sizeof(*iterator)); + ks_assert(iterator); + + iterator->pos = 0; + iterator->e = NULL; + iterator->h = h; + + if (locked == KS_READLOCKED) { + ks_rwl_read_lock(h->rwl); + iterator->locked = locked; + ks_mutex_lock(h->mutex); + h->readers++; + ks_mutex_unlock(h->mutex); + } + + return ks_hash_next(&iterator); +} + +KS_DECLARE(void) ks_hash_this_val(ks_hash_iterator_t *i, void *val) +{ + if (i->e) { + i->e->v = val; + } +} + +KS_DECLARE(void) ks_hash_this(ks_hash_iterator_t *i, const void **key, ks_ssize_t *klen, void **val) +{ + if (i->e) { + if (key) { + *key = i->e->k; + } + if (klen) { + *klen = (int)strlen(i->e->k); + } + if (val) { + *val = i->e->v; + } + } else { + if (key) { + *key = NULL; + } + if (klen) { + *klen = 0; + } + if (val) { + *val = NULL; + } + } +} + + +/* For Emacs: + * Local Variables: + * mode:c + * indent-tabs-mode:t + * tab-width:4 + * c-basic-offset:4 + * End: + * For VIM: + * vim:set softtabstop=4 shiftwidth=4 tabstop=4 noet: + */ + diff --git a/libs/libks/src/ks_json.c b/libs/libks/src/ks_json.c index 4ee4a84ef6..30c73953a8 100644 --- a/libs/libks/src/ks_json.c +++ b/libs/libks/src/ks_json.c @@ -27,11 +27,11 @@ #include #include #include -#include #include #include -#include "ks_json.h" #include "ks.h" +#include "ks_json.h" +#include static const char *ep; @@ -40,7 +40,7 @@ KS_DECLARE(const char *)cJSON_GetErrorPtr() {return ep;} static int cJSON_strcasecmp(const char *s1,const char *s2) { if (!s1) return (s1==s2)?0:1;if (!s2) return 1; - for(; tolower(*s1) == tolower(*s2); ++s1, ++s2) if(*s1 == 0) return 0; + for(; tolower(*(const unsigned char *)s1) == tolower(*(const unsigned char *)s2); ++s1, ++s2) if(*s1 == 0) return 0; return tolower(*(const unsigned char *)s1) - tolower(*(const unsigned char *)s2); } @@ -59,23 +59,23 @@ static void (*cJSON_free)(void *ptr) = glue_free; static char* cJSON_strdup(const char* str) { - size_t len; - char* copy; - const char *s = str ? str : ""; + size_t len; + char* copy; + const char *s = str ? str : ""; - len = strlen(s) + 1; - if (!(copy = (char*)cJSON_malloc(len))) return 0; - memcpy(copy,s,len); - return copy; + len = strlen(s) + 1; + if (!(copy = (char*)cJSON_malloc(len))) return 0; + memcpy(copy,s,len); + return copy; } KS_DECLARE(void)cJSON_InitHooks(cJSON_Hooks* hooks) { - if (!hooks) { /* Reset hooks */ - cJSON_malloc = malloc; - cJSON_free = free; - return; - } + if (!hooks) { /* Reset hooks */ + cJSON_malloc = malloc; + cJSON_free = free; + return; + } cJSON_malloc = (hooks->malloc_fn)?hooks->malloc_fn:malloc; cJSON_free = (hooks->free_fn)?hooks->free_fn:free; @@ -281,8 +281,8 @@ KS_DECLARE(cJSON *)cJSON_Parse(const char *value) } /* Render a cJSON item/entity/structure to text. */ -KS_DECLARE(char *) cJSON_Print(cJSON *item) {return print_value(item,0,1);} -KS_DECLARE(char *) cJSON_PrintUnformatted(cJSON *item) {return print_value(item,0,0);} +KS_DECLARE(char *)cJSON_Print(cJSON *item) {return print_value(item,0,1);} +KS_DECLARE(char *)cJSON_PrintUnformatted(cJSON *item) {return print_value(item,0,0);} /* Parser core - when encountering text, process appropriately. */ static const char *parse_value(cJSON *item,const char *value) @@ -349,7 +349,8 @@ static const char *parse_array(cJSON *item,const char *value) static char *print_array(cJSON *item,int depth,int fmt) { char **entries; - char *out=0,*ptr,*ret;int len=5; + char *out=0,*ptr,*ret; + size_t len=5; cJSON *child=item->child; int numentries=0,i=0,fail=0; @@ -436,7 +437,8 @@ static const char *parse_object(cJSON *item,const char *value) static char *print_object(cJSON *item,int depth,int fmt) { char **entries=0,**names=0; - char *out=0,*ptr,*ret,*str;int len=7,i=0,j; + char *out=0,*ptr,*ret,*str;int i=0,j; + size_t len=7; cJSON *child=item->child; int numentries=0,fail=0; /* Count the number of entries. */ @@ -493,7 +495,20 @@ static char *print_object(cJSON *item,int depth,int fmt) /* Get Array size/item / object item. */ KS_DECLARE(int) cJSON_GetArraySize(cJSON *array) {cJSON *c=array->child;int i=0;while(c)i++,c=c->next;return i;} KS_DECLARE(cJSON *)cJSON_GetArrayItem(cJSON *array,int item) {cJSON *c=array->child; while (c && item>0) item--,c=c->next; return c;} -KS_DECLARE(cJSON *)cJSON_GetObjectItem(cJSON *object,const char *string) {cJSON *c=object->child; while (c && cJSON_strcasecmp(c->string,string)) c=c->next; return c;} +KS_DECLARE(cJSON *)cJSON_GetObjectItem(const cJSON *object,const char *string) {cJSON *c=object->child; while (c && cJSON_strcasecmp(c->string,string)) c=c->next; return c;} + + +KS_DECLARE(const char *)cJSON_GetObjectCstr(const cJSON *object, const char *string) +{ + cJSON *cj = cJSON_GetObjectItem(object, string); + + if (!cj || cj->type != cJSON_String || !cj->valuestring) return NULL; + + return cj->valuestring; +} + + + /* Utility for array list handling. */ static void suffix_object(cJSON *prev,cJSON *item) {prev->next=item;item->prev=prev;} @@ -529,7 +544,68 @@ KS_DECLARE(cJSON *)cJSON_CreateArray() {cJSON *item=cJSON_New_Item();if(ite KS_DECLARE(cJSON *)cJSON_CreateObject() {cJSON *item=cJSON_New_Item();if(item)item->type=cJSON_Object;return item;} /* Create Arrays: */ -KS_DECLARE(cJSON *)cJSON_CreateIntArray(int *numbers,int count) {int i;cJSON *n=0,*p=0,*a=cJSON_CreateArray();for(i=0;a!=0 && ichild=n;else suffix_object(p,n);p=n;}return a;} -KS_DECLARE(cJSON *)cJSON_CreateFloatArray(float *numbers,int count) {int i;cJSON *n=0,*p=0,*a=cJSON_CreateArray();for(i=0;a!=0 && ichild=n;else suffix_object(p,n);p=n;}return a;} -KS_DECLARE(cJSON *)cJSON_CreateDoubleArray(double *numbers,int count) {int i;cJSON *n=0,*p=0,*a=cJSON_CreateArray();for(i=0;a!=0 && ichild=n;else suffix_object(p,n);p=n;}return a;} -KS_DECLARE(cJSON *)cJSON_CreateStringArray(const char **strings,int count) {int i;cJSON *n=0,*p=0,*a=cJSON_CreateArray();for(i=0;a!=0 && ichild=n;else suffix_object(p,n);p=n;}return a;} +KS_DECLARE(cJSON *)cJSON_CreateIntArray(int *numbers,int count) {int i;cJSON *n=0,*p=0,*a=cJSON_CreateArray();for(i=0;a && ichild=n;else suffix_object(p,n);p=n;}return a;} +KS_DECLARE(cJSON *)cJSON_CreateFloatArray(float *numbers,int count) {int i;cJSON *n=0,*p=0,*a=cJSON_CreateArray();for(i=0;a && ichild=n;else suffix_object(p,n);p=n;}return a;} +KS_DECLARE(cJSON *)cJSON_CreateDoubleArray(double *numbers,int count) {int i;cJSON *n=0,*p=0,*a=cJSON_CreateArray();for(i=0;a && ichild=n;else suffix_object(p,n);p=n;}return a;} +KS_DECLARE(cJSON *)cJSON_CreateStringArray(const char **strings,int count) {int i;cJSON *n=0,*p=0,*a=cJSON_CreateArray();for(i=0;a && ichild=n;else suffix_object(p,n);p=n;}return a;} + +/* Duplication */ +KS_DECLARE(cJSON *) cJSON_Duplicate(cJSON *item,int recurse) +{ + cJSON *newitem,*cptr,*nptr=0,*newchild; + /* Bail on bad ptr */ + if (!item) return 0; + /* Create new item */ + newitem=cJSON_New_Item(); + if (!newitem) return 0; + /* Copy over all vars */ + newitem->type=item->type&(~cJSON_IsReference),newitem->valueint=item->valueint,newitem->valuedouble=item->valuedouble; + if (item->valuestring) {newitem->valuestring=cJSON_strdup(item->valuestring); if (!newitem->valuestring) {cJSON_Delete(newitem);return 0;}} + if (item->string) {newitem->string=cJSON_strdup(item->string); if (!newitem->string) {cJSON_Delete(newitem);return 0;}} + /* If non-recursive, then we're done! */ + if (!recurse) return newitem; + /* Walk the ->next chain for the child. */ + cptr=item->child; + while (cptr) { + newchild=cJSON_Duplicate(cptr,1); /* Duplicate (with recurse) each item in the ->next chain */ + if (!newchild) {cJSON_Delete(newitem);return 0;} + if (nptr) {nptr->next=newchild,newchild->prev=nptr;nptr=newchild;} /* If newitem->child already set, then crosswire ->prev and ->next and move on */ + else {newitem->child=newchild;nptr=newchild;} /* Set newitem->child and move to it */ + cptr=cptr->next; + } + return newitem; +} + + +KS_DECLARE(cJSON *) cJSON_CreateStringPrintf(const char *fmt, ...) +{ + va_list ap; + char *str; + cJSON *item; + + va_start(ap, fmt); + str = ks_vmprintf(fmt, ap); + va_end(ap); + + if (!str) return NULL; + + if ((item = cJSON_New_Item())) { + item->type=cJSON_String; + item->valuestring = str; + } else { + free(str); + } + + return item; +} + +/* For Emacs: + * Local Variables: + * mode:c + * indent-tabs-mode:t + * tab-width:4 + * c-basic-offset:4 + * End: + * For VIM: + * vim:set softtabstop=4 shiftwidth=4 tabstop=4 noet: + */ diff --git a/libs/libks/src/ks_log.c b/libs/libks/src/ks_log.c new file mode 100644 index 0000000000..a7c49f75ce --- /dev/null +++ b/libs/libks/src/ks_log.c @@ -0,0 +1,123 @@ +/* + * Copyright (c) 2007-2014, Anthony Minessale II + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * * Neither the name of the original author; nor the names of any contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER + * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include + +static void null_logger(const char *file, const char *func, int line, int level, const char *fmt, ...) +{ + if (file && func && line && level && fmt) { + return; + } + return; +} + + +static const char *LEVEL_NAMES[] = { + "EMERG", + "ALERT", + "CRIT", + "ERROR", + "WARNING", + "NOTICE", + "INFO", + "DEBUG", + NULL +}; + +static int ks_log_level = 7; + +static const char *cut_path(const char *in) +{ + const char *p, *ret = in; + char delims[] = "/\\"; + char *i; + + for (i = delims; *i; i++) { + p = in; + while ((p = strchr(p, *i)) != 0) { + ret = ++p; + } + } + return ret; +} + + +static void default_logger(const char *file, const char *func, int line, int level, const char *fmt, ...) +{ + const char *fp; + char *data; + va_list ap; + int ret; + + if (level < 0 || level > 7) { + level = 7; + } + if (level > ks_log_level) { + return; + } + + fp = cut_path(file); + + va_start(ap, fmt); + + ret = ks_vasprintf(&data, fmt, ap); + + if (ret != -1) { + fprintf(stderr, "[%s] %s:%d %s() %s", LEVEL_NAMES[level], fp, line, func, data); + free(data); + } + + va_end(ap); + +} + +ks_logger_t ks_log = null_logger; + +KS_DECLARE(void) ks_global_set_logger(ks_logger_t logger) +{ + if (logger) { + ks_log = logger; + } else { + ks_log = null_logger; + } +} + +KS_DECLARE(void) ks_global_set_default_logger(int level) +{ + if (level < 0 || level > 7) { + level = 7; + } + + ks_log = default_logger; + ks_log_level = level; +} diff --git a/libs/libks/src/ks_mutex.c b/libs/libks/src/ks_mutex.c new file mode 100644 index 0000000000..6c14f24958 --- /dev/null +++ b/libs/libks/src/ks_mutex.c @@ -0,0 +1,648 @@ +/* + * Cross Platform Thread/Mutex abstraction + * Copyright(C) 2007 Michael Jerris + * + * You may opt to use, copy, modify, merge, publish, distribute and/or sell + * copies of the Software, and permit persons to whom the Software is + * furnished to do so. + * + * This work is provided under this license on an "as is" basis, without warranty of any kind, + * either expressed or implied, including, without limitation, warranties that the covered code + * is free of defects, merchantable, fit for a particular purpose or non-infringing. The entire + * risk as to the quality and performance of the covered code is with you. Should any covered + * code prove defective in any respect, you (not the initial developer or any other contributor) + * assume the cost of any necessary servicing, repair or correction. This disclaimer of warranty + * constitutes an essential part of this license. No use of any covered code is authorized hereunder + * except under this disclaimer. + * + */ + +#ifdef WIN32 +/* required for TryEnterCriticalSection definition. Must be defined before windows.h include */ +#define _WIN32_WINNT 0x0400 +#endif + +#include "ks.h" + +#ifdef WIN32 +#include +#else +#include +#endif + +typedef enum { + KS_MUTEX_TYPE_DEFAULT, + KS_MUTEX_TYPE_NON_RECURSIVE +} ks_mutex_type_t; + +struct ks_mutex { +#ifdef WIN32 + CRITICAL_SECTION mutex; + HANDLE handle; +#else + pthread_mutex_t mutex; +#endif + ks_pool_t * pool; + ks_mutex_type_t type; + uint8_t malloc; +}; + +static void ks_mutex_cleanup(ks_pool_t *mpool, void *ptr, void *arg, int type, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t ctype) +{ + ks_mutex_t *mutex = (ks_mutex_t *) ptr; + + switch(action) { + case KS_MPCL_ANNOUNCE: + break; + case KS_MPCL_TEARDOWN: + break; + case KS_MPCL_DESTROY: +#ifdef WIN32 + if (mutex->type == KS_MUTEX_TYPE_NON_RECURSIVE) { + CloseHandle(mutex->handle); + } else { + DeleteCriticalSection(&mutex->mutex); + } +#else + pthread_mutex_destroy(&mutex->mutex); +#endif + break; + } +} + +KS_DECLARE(ks_status_t) ks_mutex_destroy(ks_mutex_t **mutexP) +{ + ks_mutex_t *mutex; + + ks_assert(mutexP); + + mutex = *mutexP; + *mutexP = NULL; + + if (!mutex) return KS_STATUS_FAIL; + + if (mutex->malloc) { +#ifdef WIN32 + if (mutex->type == KS_MUTEX_TYPE_NON_RECURSIVE) { + CloseHandle(mutex->handle); + } else { + DeleteCriticalSection(&mutex->mutex); + } +#else + pthread_mutex_destroy(&mutex->mutex); +#endif + free(mutex); + } else { + ks_pool_free(mutex->pool, (void *)mutex); + } + + return KS_STATUS_SUCCESS; +} + +KS_DECLARE(ks_status_t) ks_mutex_create(ks_mutex_t **mutex, unsigned int flags, ks_pool_t *pool) +{ + ks_status_t status = KS_STATUS_FAIL; +#ifndef WIN32 + pthread_mutexattr_t attr; +#endif + ks_mutex_t *check = NULL; + + if (pool) { + if (!(check = (ks_mutex_t *) ks_pool_alloc(pool, sizeof(**mutex)))) { + goto done; + } + } else { + check = malloc(sizeof(**mutex)); + memset(check, 0, sizeof(**mutex)); + check->malloc = 1; + } + + check->pool = pool; + check->type = KS_MUTEX_TYPE_DEFAULT; + +#ifdef WIN32 + if (flags & KS_MUTEX_FLAG_NON_RECURSIVE) { + check->type = KS_MUTEX_TYPE_NON_RECURSIVE; + check->handle = CreateEvent(NULL, FALSE, TRUE, NULL); + } else { + InitializeCriticalSection(&check->mutex); + } +#else + if (flags & KS_MUTEX_FLAG_NON_RECURSIVE) { + if (pthread_mutex_init(&check->mutex, NULL)) + goto done; + + } else { + if (pthread_mutexattr_init(&attr)) + goto done; + + if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)) + goto fail; + + if (pthread_mutex_init(&check->mutex, &attr)) + goto fail; + } + + goto success; + + fail: + pthread_mutexattr_destroy(&attr); + goto done; + + success: +#endif + *mutex = check; + status = KS_STATUS_SUCCESS; + + if (pool) { + ks_pool_set_cleanup(pool, check, NULL, 0, ks_mutex_cleanup); + } + + done: + return status; +} + +KS_DECLARE(ks_status_t) ks_mutex_lock(ks_mutex_t *mutex) +{ +#ifdef WIN32 + if (mutex->type == KS_MUTEX_TYPE_NON_RECURSIVE) { + DWORD ret = WaitForSingleObject(mutex->handle, INFINITE); + if ((ret != WAIT_OBJECT_0) && (ret != WAIT_ABANDONED)) { + return KS_STATUS_FAIL; + } + } else { + EnterCriticalSection(&mutex->mutex); + } +#else + if (pthread_mutex_lock(&mutex->mutex)) + return KS_STATUS_FAIL; +#endif + return KS_STATUS_SUCCESS; +} + +KS_DECLARE(ks_status_t) ks_mutex_trylock(ks_mutex_t *mutex) +{ +#ifdef WIN32 + if (mutex->type == KS_MUTEX_TYPE_NON_RECURSIVE) { + DWORD ret = WaitForSingleObject(mutex->handle, 0); + if ((ret != WAIT_OBJECT_0) && (ret != WAIT_ABANDONED)) { + return KS_STATUS_FAIL; + } + } else { + if (!TryEnterCriticalSection(&mutex->mutex)) + return KS_STATUS_FAIL; + } +#else + if (pthread_mutex_trylock(&mutex->mutex)) + return KS_STATUS_FAIL; +#endif + return KS_STATUS_SUCCESS; +} + +KS_DECLARE(ks_status_t) ks_mutex_unlock(ks_mutex_t *mutex) +{ +#ifdef WIN32 + if (mutex->type == KS_MUTEX_TYPE_NON_RECURSIVE) { + if (!SetEvent(mutex->handle)) { + return KS_STATUS_FAIL; + } + } else { + LeaveCriticalSection(&mutex->mutex); + } +#else + if (pthread_mutex_unlock(&mutex->mutex)) + return KS_STATUS_FAIL; +#endif + return KS_STATUS_SUCCESS; +} + + + +struct ks_cond { + ks_pool_t * pool; + ks_mutex_t *mutex; +#ifdef WIN32 + CONDITION_VARIABLE cond; +#else + pthread_cond_t cond; +#endif + uint8_t static_mutex; +}; + +static void ks_cond_cleanup(ks_pool_t *mpool, void *ptr, void *arg, int type, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t ctype) +{ + ks_cond_t *cond = (ks_cond_t *) ptr; + + switch(action) { + case KS_MPCL_ANNOUNCE: + break; + case KS_MPCL_TEARDOWN: + break; + case KS_MPCL_DESTROY: + if (!cond->static_mutex) { + ks_mutex_destroy(&cond->mutex); + } +#ifndef WIN32 + pthread_cond_destroy(&cond->cond); +#endif + break; + } +} + +KS_DECLARE(ks_status_t) ks_cond_create_ex(ks_cond_t **cond, ks_pool_t *pool, ks_mutex_t *mutex) +{ + ks_status_t status = KS_STATUS_FAIL; + ks_cond_t *check = NULL; + + *cond = NULL; + + if (!pool) + goto done; + + if (!(check = (ks_cond_t *) ks_pool_alloc(pool, sizeof(**cond)))) { + goto done; + } + + check->pool = pool; + if (mutex) { + check->mutex = mutex; + check->static_mutex = 1; + } else { + if (ks_mutex_create(&check->mutex, KS_MUTEX_FLAG_DEFAULT, pool) != KS_STATUS_SUCCESS) { + goto done; + } + } + +#ifdef WIN32 + InitializeConditionVariable(&check->cond); +#else + if (pthread_cond_init(&check->cond, NULL)) { + if (!check->static_mutex) { + ks_mutex_destroy(&check->mutex); + } + goto done; + } +#endif + + *cond = check; + status = KS_STATUS_SUCCESS; + ks_pool_set_cleanup(pool, check, NULL, 0, ks_cond_cleanup); + + done: + return status; +} + +KS_DECLARE(ks_mutex_t *) ks_cond_get_mutex(ks_cond_t *cond) +{ + return cond->mutex; +} + +KS_DECLARE(ks_status_t) ks_cond_create(ks_cond_t **cond, ks_pool_t *pool) +{ + return ks_cond_create_ex(cond, pool, NULL); +} + +KS_DECLARE(ks_status_t) ks_cond_lock(ks_cond_t *cond) +{ + return ks_mutex_lock(cond->mutex); +} + +KS_DECLARE(ks_status_t) ks_cond_trylock(ks_cond_t *cond) +{ + return ks_mutex_trylock(cond->mutex); +} + +KS_DECLARE(ks_status_t) ks_cond_unlock(ks_cond_t *cond) +{ + return ks_mutex_unlock(cond->mutex); +} + +KS_DECLARE(ks_status_t) ks_cond_signal(ks_cond_t *cond) +{ + ks_cond_lock(cond); +#ifdef WIN32 + WakeConditionVariable(&cond->cond); +#else + pthread_cond_signal(&cond->cond); +#endif + ks_cond_unlock(cond); + return KS_STATUS_SUCCESS; +} + +KS_DECLARE(ks_status_t) ks_cond_broadcast(ks_cond_t *cond) +{ + ks_cond_lock(cond); +#ifdef WIN32 + WakeAllConditionVariable(&cond->cond); +#else + pthread_cond_broadcast(&cond->cond); +#endif + ks_cond_unlock(cond); + return KS_STATUS_SUCCESS; +} + +KS_DECLARE(ks_status_t) ks_cond_try_signal(ks_cond_t *cond) +{ + if (ks_cond_trylock(cond) != KS_STATUS_SUCCESS) { + return KS_STATUS_FAIL; + } +#ifdef WIN32 + WakeConditionVariable(&cond->cond); +#else + pthread_cond_signal(&cond->cond); +#endif + ks_cond_unlock(cond); + return KS_STATUS_SUCCESS; +} + +KS_DECLARE(ks_status_t) ks_cond_try_broadcast(ks_cond_t *cond) +{ + if (ks_cond_trylock(cond) != KS_STATUS_SUCCESS) { + return KS_STATUS_FAIL; + } +#ifdef WIN32 + WakeAllConditionVariable(&cond->cond); +#else + pthread_cond_broadcast(&cond->cond); +#endif + ks_cond_unlock(cond); + return KS_STATUS_SUCCESS; +} + +KS_DECLARE(ks_status_t) ks_cond_wait(ks_cond_t *cond) +{ +#ifdef WIN32 + SleepConditionVariableCS(&cond->cond, &cond->mutex->mutex, INFINITE); +#else + pthread_cond_wait(&cond->cond, &cond->mutex->mutex); +#endif + + return KS_STATUS_SUCCESS; +} + +KS_DECLARE(ks_status_t) ks_cond_timedwait(ks_cond_t *cond, ks_time_t ms) +{ +#ifdef WIN32 + if(!SleepConditionVariableCS(&cond->cond, &cond->mutex->mutex, (DWORD)ms)) { + if (GetLastError() == ERROR_TIMEOUT) { + return KS_STATUS_TIMEOUT; + } else { + return KS_STATUS_FAIL; + } + } +#else + struct timespec ts; + ks_time_t n = ks_time_now() + (ms * 1000); + ts.tv_sec = ks_time_sec(n); + ts.tv_nsec = ks_time_nsec(n); + if (pthread_cond_timedwait(&cond->cond, &cond->mutex->mutex, &ts)) { + switch(errno) { + case ETIMEDOUT: + return KS_STATUS_TIMEOUT; + default: + return KS_STATUS_FAIL; + } + } +#endif + + return KS_STATUS_SUCCESS; +} + +KS_DECLARE(ks_status_t) ks_cond_destroy(ks_cond_t **cond) +{ + ks_cond_t *condp = *cond; + + if (!condp) { + return KS_STATUS_FAIL; + } + + *cond = NULL; + + return ks_pool_free(condp->pool, condp); +} + + +struct ks_rwl { +#ifdef WIN32 + SRWLOCK rwlock; + ks_hash_t *read_lock_list; +#else + pthread_rwlock_t rwlock; +#endif + ks_pool_t *pool; + ks_thread_os_handle_t write_locker; + uint32_t wlc; +}; + +static void ks_rwl_cleanup(ks_pool_t *mpool, void *ptr, void *arg, int type, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t ctype) +{ +#ifndef WIN32 + ks_rwl_t *rwlock = (ks_rwl_t *) ptr; +#endif + + switch(action) { + case KS_MPCL_ANNOUNCE: + break; + case KS_MPCL_TEARDOWN: + break; + case KS_MPCL_DESTROY: +#ifndef WIN32 + pthread_rwlock_destroy(&rwlock->rwlock); +#endif + break; + } +} + +KS_DECLARE(ks_status_t) ks_rwl_create(ks_rwl_t **rwlock, ks_pool_t *pool) +{ + ks_status_t status = KS_STATUS_FAIL; + ks_rwl_t *check = NULL; + *rwlock = NULL; + + if (!pool) { + goto done; + } + + if (!(check = (ks_rwl_t *) ks_pool_alloc(pool, sizeof(**rwlock)))) { + goto done; + } + + check->pool = pool; + +#ifdef WIN32 + + if (ks_hash_create(&check->read_lock_list, KS_HASH_MODE_PTR, KS_HASH_FLAG_NONE, pool) != KS_STATUS_SUCCESS) { + goto done; + } + + InitializeSRWLock(&check->rwlock); +#else + if ((pthread_rwlock_init(&check->rwlock, NULL))) { + goto done; + } +#endif + + *rwlock = check; + status = KS_STATUS_SUCCESS; + ks_pool_set_cleanup(pool, check, NULL, 0, ks_rwl_cleanup); + done: + return status; +} + +KS_DECLARE(ks_status_t) ks_rwl_read_lock(ks_rwl_t *rwlock) +{ +#ifdef WIN32 + int count = (int)(intptr_t)ks_hash_remove(rwlock->read_lock_list, (void *)(intptr_t)ks_thread_self()); + + if (count) { + ks_hash_insert(rwlock->read_lock_list, (void *)(intptr_t)ks_thread_self(), (void *)(intptr_t)++count); + return KS_STATUS_SUCCESS; + } + + + AcquireSRWLockShared(&rwlock->rwlock); +#else + pthread_rwlock_rdlock(&rwlock->rwlock); +#endif + +#ifdef WIN32 + ks_hash_insert(rwlock->read_lock_list, (void *)(intptr_t)ks_thread_self(), (void *)(intptr_t)(int)1); +#endif + + return KS_STATUS_SUCCESS; +} + +KS_DECLARE(ks_status_t) ks_rwl_write_lock(ks_rwl_t *rwlock) +{ + + int me = (rwlock->write_locker == ks_thread_self()); + + if (me) { + rwlock->wlc++; + return KS_STATUS_SUCCESS; + } + +#ifdef WIN32 + AcquireSRWLockExclusive(&rwlock->rwlock); +#else + pthread_rwlock_wrlock(&rwlock->rwlock); +#endif + rwlock->write_locker = ks_thread_self(); + + return KS_STATUS_SUCCESS; +} + +KS_DECLARE(ks_status_t) ks_rwl_try_read_lock(ks_rwl_t *rwlock) +{ +#ifdef WIN32 + int count = (int)(intptr_t)ks_hash_remove(rwlock->read_lock_list, (void *)(intptr_t)ks_thread_self()); + + if (count) { + ks_hash_insert(rwlock->read_lock_list, (void *)(intptr_t)ks_thread_self(), (void *)(intptr_t)++count); + return KS_STATUS_SUCCESS; + } + + if (!TryAcquireSRWLockShared(&rwlock->rwlock)) { + return KS_STATUS_FAIL; + } +#else + if (pthread_rwlock_tryrdlock(&rwlock->rwlock)) { + return KS_STATUS_FAIL; + } +#endif + +#ifdef WIN32 + ks_hash_insert(rwlock->read_lock_list, (void *)(intptr_t)ks_thread_self(), (void *)(intptr_t)(int)1); +#endif + + return KS_STATUS_SUCCESS; +} + +KS_DECLARE(ks_status_t) ks_rwl_try_write_lock(ks_rwl_t *rwlock) +{ + int me = (rwlock->write_locker == ks_thread_self()); + + if (me) { + rwlock->wlc++; + return KS_STATUS_SUCCESS; + } + +#ifdef WIN32 + if (!TryAcquireSRWLockExclusive(&rwlock->rwlock)) { + return KS_STATUS_FAIL; + } +#else + if (pthread_rwlock_trywrlock(&rwlock->rwlock)) { + return KS_STATUS_FAIL; + } +#endif + + rwlock->write_locker = ks_thread_self(); + + return KS_STATUS_SUCCESS; +} + +KS_DECLARE(ks_status_t) ks_rwl_read_unlock(ks_rwl_t *rwlock) +{ +#ifdef WIN32 + int count = (int)(intptr_t)ks_hash_remove(rwlock->read_lock_list, (void *)(intptr_t)ks_thread_self()); + + if (count > 1) { + ks_hash_insert(rwlock->read_lock_list, (void *)(intptr_t)ks_thread_self(), (void *)(intptr_t)--count); + return KS_STATUS_SUCCESS; + } + + ReleaseSRWLockShared(&rwlock->rwlock); +#else + pthread_rwlock_unlock(&rwlock->rwlock); +#endif + + return KS_STATUS_SUCCESS; +} + +KS_DECLARE(ks_status_t) ks_rwl_write_unlock(ks_rwl_t *rwlock) +{ + int me = (rwlock->write_locker == ks_thread_self()); + + if (me && rwlock->wlc > 0) { + rwlock->wlc--; + return KS_STATUS_SUCCESS; + } + + rwlock->write_locker = 0; + +#ifdef WIN32 + ReleaseSRWLockExclusive(&rwlock->rwlock); +#else + pthread_rwlock_unlock(&rwlock->rwlock); +#endif + + return KS_STATUS_SUCCESS; +} + +KS_DECLARE(ks_status_t) ks_rwl_destroy(ks_rwl_t **rwlock) +{ + ks_rwl_t *rwlockp = *rwlock; + + + if (!rwlockp) { + return KS_STATUS_FAIL; + } + + *rwlock = NULL; + + return ks_pool_free(rwlockp->pool, rwlockp); +} + + + +/* For Emacs: + * Local Variables: + * mode:c + * indent-tabs-mode:t + * tab-width:4 + * c-basic-offset:4 + * End: + * For VIM: + * vim:set softtabstop=4 shiftwidth=4 tabstop=4 noet: + */ diff --git a/libs/libks/src/ks_pool.c b/libs/libks/src/ks_pool.c new file mode 100644 index 0000000000..2a6750e91d --- /dev/null +++ b/libs/libks/src/ks_pool.c @@ -0,0 +1,2171 @@ +/* + * Memory pool routines. + * + * Copyright 1996 by Gray Watson. + * + * This file is part of the ks_mpool package. + * + * Permission to use, copy, modify, and distribute this software for + * any purpose and without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies, and that the name of Gray Watson not be used in advertising + * or publicity pertaining to distribution of the document or software + * without specific, written prior permission. + * + * Gray Watson makes no representations about the suitability of the + * software described herein for any purpose. It is provided "as is" + * without express or implied warranty. + * + * The author may be reached via http://256.com/gray/ + * + * $Id: ks_mpool.c,v 1.5 2006/05/31 20:28:31 gray Exp $ + */ + +/* + * Memory-pool allocation routines. I got sick of the GNU mmalloc + * library which was close to what we needed but did not exactly do + * what I wanted. + * + * The following uses mmap from /dev/zero. It allows a number of + * allocations to be made inside of a memory pool then with a clear or + * close the pool can be reset without any memory fragmentation and + * growth problems. + */ + +#include "ks.h" +#include + +#define KS_POOL_MAGIC 0xABACABA /* magic for struct */ +#define BLOCK_MAGIC 0xB1B1007 /* magic for blocks */ +#define FENCE_MAGIC0 (unsigned char)(0xFAU) /* 1st magic mem byte */ +#define FENCE_MAGIC1 (unsigned char)(0xD3U) /* 2nd magic mem byte */ + +#define FENCE_SIZE 2 /* fence space */ +#define MIN_ALLOCATION (sizeof(ks_pool_free_t)) /* min alloc */ +#define MAX_FREE_SEARCH 10240 /* max size to search */ +#define MAX_FREE_LIST_SEARCH 100 /* max looking for free mem */ + +#define PRE_MAGIC1 0x33U +#define PRE_MAGIC2 0xCCU + +typedef struct alloc_prefix_s { + unsigned char m1; + unsigned long size; + unsigned char m2; +} alloc_prefix_t; + +#define PREFIX_SIZE sizeof(struct alloc_prefix_s) + +/* + * bitflag tools for Variable and a Flag + */ +#define BIT_FLAG(x) (1 << (x)) +#define BIT_SET(v,f) (v) |= (f) +#define BIT_CLEAR(v,f) (v) &= ~(f) +#define BIT_IS_SET(v,f) ((v) & (f)) +#define BIT_TOGGLE(v,f) (v) ^= (f) + +#define SET_POINTER(pnt, val) \ + do { \ + if ((pnt) != NULL) { \ + (*(pnt)) = (val); \ + } \ + } while(0) + +#define BLOCK_FLAG_USED BIT_FLAG(0) /* block is used */ +#define BLOCK_FLAG_FREE BIT_FLAG(1) /* block is free */ + +#define DEFAULT_PAGE_MULT 16 /* pagesize = this * getpagesize */ + +/* How many pages SIZE bytes resides in. We add in the block header. */ +#define PAGES_IN_SIZE(mp_p, size) (((size) + sizeof(ks_pool_block_t) + \ + (mp_p)->mp_page_size - 1) / \ + (mp_p)->mp_page_size) +#define SIZE_OF_PAGES(mp_p, page_n) ((page_n) * (mp_p)->mp_page_size) +#define MAX_BITS 30 /* we only can allocate 1gb chunks */ + +#define MAX_BLOCK_USER_MEMORY(mp_p) ((mp_p)->mp_page_size - \ + sizeof(ks_pool_block_t)) +#define FIRST_ADDR_IN_BLOCK(block_p) (void *)((char *)(block_p) + \ + sizeof(ks_pool_block_t)) +#define MEMORY_IN_BLOCK(block_p) ((char *)(block_p)->mb_bounds_p - \ + ((char *)(block_p) + \ + sizeof(ks_pool_block_t))) + +typedef struct ks_pool_cleanup_node_s { + ks_pool_cleanup_fn_t fn; + void *ptr; + void *arg; + int type; + struct ks_pool_cleanup_node_s *next; +} ks_pool_cleanup_node_t; + +struct ks_pool_s { + unsigned int mp_magic; /* magic number for struct */ + unsigned int mp_flags; /* flags for the struct */ + unsigned long mp_alloc_c; /* number of allocations */ + unsigned long mp_user_alloc; /* user bytes allocated */ + unsigned long mp_max_alloc; /* maximum user bytes allocated */ + unsigned int mp_page_c; /* number of pages allocated */ + unsigned int mp_max_pages; /* maximum number of pages to use */ + unsigned int mp_page_size; /* page-size of our system */ + off_t mp_top; /* top of our allocations in fd */ + ks_pool_log_func_t mp_log_func; /* log callback function */ + void *mp_addr; /* current address for mmaping */ + void *mp_min_p; /* min address in pool for checks */ + void *mp_bounds_p; /* max address in pool for checks */ + struct ks_pool_block_st *mp_first_p; /* first memory block we are using */ + struct ks_pool_block_st *mp_last_p; /* last memory block we are using */ + struct ks_pool_block_st *mp_free[MAX_BITS + 1]; /* free lists based on size */ + unsigned int mp_magic2; /* upper magic for overwrite sanity */ + ks_pool_cleanup_node_t *clfn_list; + ks_mutex_t *mutex; + ks_mutex_t *cleanup_mutex; + uint8_t cleaning_up; +}; + +/* for debuggers to be able to interrogate the generic type in the .h file */ +typedef ks_pool_t ks_pool_ext_t; + +/* + * Block header structure. This structure *MUST* be long-word + * aligned. + */ +typedef struct ks_pool_block_st { + unsigned int mb_magic; /* magic number for block header */ + void *mb_bounds_p; /* block boundary location */ + struct ks_pool_block_st *mb_next_p; /* linked list next pointer */ + unsigned int mb_magic2; /* upper magic for overwrite sanity */ +} ks_pool_block_t; + +/* + * Free list structure. + */ +typedef struct { + void *mf_next_p; /* pointer to the next free address */ + unsigned long mf_size; /* size of the free block */ +} ks_pool_free_t; + +#ifndef MAP_ANON +#define MAP_ANON MAP_ANONYMOUS +#endif + +/* local variables */ +static int enabled_b = 0; /* lib initialized? */ +static unsigned int min_bit_free_next = 0; /* min size of next pnt */ +static unsigned int min_bit_free_size = 0; /* min size of next + size */ +static unsigned long bit_array[MAX_BITS + 1]; /* size -> bit */ + +#ifdef _MSC_VER +#include +long getpagesize(void) +{ + static long g_pagesize = 0; + if (!g_pagesize) { + SYSTEM_INFO system_info; + GetSystemInfo(&system_info); + g_pagesize = system_info.dwPageSize; + } + return g_pagesize; +} +#endif + + +/* We need mutex here probably or this notion of cleanup stuff cannot be threadsafe */ + +#if 0 +static ks_pool_cleanup_node_t *find_cleanup_node(ks_pool_t *mp_p, void *ptr) +{ + ks_pool_cleanup_node_t *np, *cnode = NULL; + + ks_assert(mp_p); + ks_assert(ptr); + + for (np = mp_p->clfn_list; np; np = np->next) { + if (np->ptr == ptr) { + cnode = np; + goto end; + } + } + + end: + + /* done, the nodes are all from the pool so they will be destroyed */ + return cnode; +} +#endif + +static void perform_pool_cleanup_on_free(ks_pool_t *mp_p, void *ptr) +{ + ks_pool_cleanup_node_t *np, *cnode, *last = NULL; + + np = mp_p->clfn_list; + + ks_mutex_lock(mp_p->mutex); + if (mp_p->cleaning_up) { + ks_mutex_unlock(mp_p->mutex); + return; + } + ks_mutex_unlock(mp_p->mutex); + + ks_mutex_lock(mp_p->cleanup_mutex); + while(np) { + if (np->ptr == ptr) { + if (last) { + last->next = np->next; + } else { + mp_p->clfn_list = np->next; + } + + cnode = np; + np = np->next; + cnode->fn(mp_p, cnode->ptr, cnode->arg, cnode->type, KS_MPCL_ANNOUNCE, KS_MPCL_FREE); + cnode->fn(mp_p, cnode->ptr, cnode->arg, cnode->type, KS_MPCL_TEARDOWN, KS_MPCL_FREE); + cnode->fn(mp_p, cnode->ptr, cnode->arg, cnode->type, KS_MPCL_DESTROY, KS_MPCL_FREE); + + continue; + } + last = np; + np = np->next; + } + ks_mutex_unlock(mp_p->cleanup_mutex); +} + +static void perform_pool_cleanup(ks_pool_t *mp_p) +{ + ks_pool_cleanup_node_t *np; + + ks_mutex_lock(mp_p->mutex); + if (mp_p->cleaning_up) { + ks_mutex_unlock(mp_p->mutex); + return; + } + mp_p->cleaning_up = 1; + ks_mutex_unlock(mp_p->mutex); + + ks_mutex_lock(mp_p->cleanup_mutex); + for (np = mp_p->clfn_list; np; np = np->next) { + np->fn(mp_p, np->ptr, np->arg, np->type, KS_MPCL_ANNOUNCE, KS_MPCL_GLOBAL_FREE); + } + + for (np = mp_p->clfn_list; np; np = np->next) { + np->fn(mp_p, np->ptr, np->arg, np->type, KS_MPCL_TEARDOWN, KS_MPCL_GLOBAL_FREE); + } + + for (np = mp_p->clfn_list; np; np = np->next) { + np->fn(mp_p, np->ptr, np->arg, np->type, KS_MPCL_DESTROY, KS_MPCL_GLOBAL_FREE); + } + ks_mutex_unlock(mp_p->cleanup_mutex); + + mp_p->clfn_list = NULL; +} + +KS_DECLARE(ks_status_t) ks_pool_set_cleanup(ks_pool_t *mp_p, void *ptr, void *arg, int type, ks_pool_cleanup_fn_t fn) +{ + ks_pool_cleanup_node_t *cnode; + + ks_assert(mp_p); + ks_assert(ptr); + ks_assert(fn); + + /* don't set cleanup on this cnode obj or it will be an endless loop */ + cnode = (ks_pool_cleanup_node_t *) ks_pool_alloc(mp_p, sizeof(*cnode)); + + if (!cnode) { + return KS_STATUS_FAIL; + } + + cnode->ptr = ptr; + cnode->arg = arg; + cnode->fn = fn; + cnode->type = type; + + ks_mutex_lock(mp_p->cleanup_mutex); + cnode->next = mp_p->clfn_list; + mp_p->clfn_list = cnode; + ks_mutex_unlock(mp_p->cleanup_mutex); + + return KS_STATUS_SUCCESS; +} + + + +/****************************** local utilities ******************************/ + +/* + * static void startup + * + * DESCRIPTION: + * + * Perform any library level initialization. + * + * RETURNS: + * + * None. + * + * ARGUMENTS: + * + * None. + */ +static void startup(void) +{ + int bit_c; + unsigned long size = 1; + + if (enabled_b) { + return; + } + + /* allocate our free bit array list */ + for (bit_c = 0; bit_c <= MAX_BITS; bit_c++) { + bit_array[bit_c] = size; + + /* + * Note our minimum number of bits that can store a pointer. This + * is smallest address that we can have a linked list for. + */ + if (min_bit_free_next == 0 && size >= sizeof(void *)) { + min_bit_free_next = bit_c; + } + /* + * Note our minimum number of bits that can store a pointer and + * the size of the block. + */ + if (min_bit_free_size == 0 && size >= sizeof(ks_pool_free_t)) { + min_bit_free_size = bit_c; + } + + size *= 2; + } + + enabled_b = 1; +} + +/* + * static int size_to_bits + * + * DESCRIPTION: + * + * Calculate the number of bits in a size. + * + * RETURNS: + * + * Number of bits. + * + * ARGUMENTS: + * + * size -> Size of memory of which to calculate the number of bits. + */ +static int size_to_bits(const unsigned long size) +{ + int bit_c = 0; + + for (bit_c = 0; bit_c <= MAX_BITS; bit_c++) { + if (size <= bit_array[bit_c]) { + break; + } + } + + return bit_c; +} + +/* + * static int size_to_free_bits + * + * DESCRIPTION: + * + * Calculate the number of bits in a size going on the free list. + * + * RETURNS: + * + * Number of bits. + * + * ARGUMENTS: + * + * size -> Size of memory of which to calculate the number of bits. + */ +static int size_to_free_bits(const unsigned long size) +{ + int bit_c = 0; + + if (size == 0) { + return 0; + } + + for (bit_c = 0; bit_c <= MAX_BITS; bit_c++) { + if (size < bit_array[bit_c]) { + break; + } + } + + return bit_c - 1; +} + +/* + * static int bits_to_size + * + * DESCRIPTION: + * + * Calculate the size represented by a number of bits. + * + * RETURNS: + * + * Number of bits. + * + * ARGUMENTS: + * + * bit_n -> Number of bits + */ +static unsigned long bits_to_size(const int bit_n) +{ + if (bit_n > MAX_BITS) { + return bit_array[MAX_BITS]; + } else { + return bit_array[bit_n]; + } +} + +/* + * static void *alloc_pages + * + * DESCRIPTION: + * + * Allocate space for a number of memory pages in the memory pool. + * + * RETURNS: + * + * Success - New pages of memory + * + * Failure - NULL + * + * ARGUMENTS: + * + * mp_p <-> Pointer to our memory pool. + * + * page_n -> Number of pages to alloc. + * + * error_p <- Pointer to ks_status_t which, if not NULL, will be set with + * a ks_pool error code. + */ +static void *alloc_pages(ks_pool_t *mp_p, const unsigned int page_n, ks_status_t *error_p) +{ + void *mem; + unsigned long size; + int state; + + /* are we over our max-pages? */ + if (mp_p->mp_max_pages > 0 && mp_p->mp_page_c >= mp_p->mp_max_pages) { + SET_POINTER(error_p, KS_STATUS_NO_PAGES); + return NULL; + } + + size = SIZE_OF_PAGES(mp_p, page_n); + +#ifdef DEBUG + (void) printf("allocating %u pages or %lu bytes\n", page_n, size); +#endif + + + state = MAP_PRIVATE | MAP_ANON; + +#if defined(MAP_FILE) + state |= MAP_FILE; +#endif + +#if defined(MAP_VARIABLE) + state |= MAP_VARIABLE; +#endif + + /* mmap from /dev/zero */ + mem = mmap(mp_p->mp_addr, size, PROT_READ | PROT_WRITE, state, -1, mp_p->mp_top); + + if (mem == (void *) MAP_FAILED) { + if (errno == ENOMEM) { + SET_POINTER(error_p, KS_STATUS_NO_MEM); + } else { + SET_POINTER(error_p, KS_STATUS_MMAP); + } + return NULL; + } + + mp_p->mp_top += size; + + if (mp_p->mp_addr != NULL) { + mp_p->mp_addr = (char *) mp_p->mp_addr + size; + } + + mp_p->mp_page_c += page_n; + + SET_POINTER(error_p, KS_STATUS_SUCCESS); + return mem; +} + +/* + * static int free_pages + * + * DESCRIPTION: + * + * Free previously allocated pages of memory. + * + * RETURNS: + * + * Success - KS_STATUS_SUCCESS + * + * Failure - Ks_Pool error code + * + * ARGUMENTS: + * + * pages <-> Pointer to memory pages that we are freeing. + * + * size -> Size of the block that we are freeing. + * + * sbrk_b -> Set to one if the pages were allocated with sbrk else mmap. + */ +static int free_pages(void *pages, const unsigned long size) +{ + (void) munmap(pages, size); + return KS_STATUS_SUCCESS; +} + +/* + * static int check_magic + * + * DESCRIPTION: + * + * Check for the existance of the magic ID in a memory pointer. + * + * RETURNS: + * + * Success - KS_STATUS_SUCCESS + * + * Failure - Ks_Pool error code + * + * ARGUMENTS: + * + * addr -> Address inside of the block that we are tryign to locate. + * + * size -> Size of the block. + */ +static int check_magic(const void *addr, const unsigned long size) +{ + const unsigned char *mem_p; + + /* set our starting point */ + mem_p = (unsigned char *) addr + size; + + if (*mem_p == FENCE_MAGIC0 && *(mem_p + 1) == FENCE_MAGIC1) { + return KS_STATUS_SUCCESS; + } else { + return KS_STATUS_PNT_OVER; + } +} + +/* + * static void write_magic + * + * DESCRIPTION: + * + * Write the magic ID to the address. + * + * RETURNS: + * + * None. + * + * ARGUMENTS: + * + * addr -> Address where to write the magic. + */ +static void write_magic(const void *addr) +{ + *(unsigned char *) addr = FENCE_MAGIC0; + *((unsigned char *) addr + 1) = FENCE_MAGIC1; +} + +/* + * static void free_pointer + * + * DESCRIPTION: + * + * Moved a pointer into our free lists. + * + * RETURNS: + * + * Success - KS_STATUS_SUCCESS + * + * Failure - Ks_Pool error code + * + * ARGUMENTS: + * + * mp_p <-> Pointer to the memory pool. + * + * addr <-> Address where to write the magic. We may write a next + * pointer to it. + * + * size -> Size of the address space. + */ +static int free_pointer(ks_pool_t *mp_p, void *addr, const unsigned long size) +{ + unsigned int bit_n; + unsigned long real_size; + ks_pool_free_t free_pnt; + +#ifdef DEBUG + (void) printf("freeing a block at %lx of %lu bytes\n", (long) addr, size); +#endif + + if (size == 0) { + return KS_STATUS_SUCCESS; + } + + /* + * if the user size is larger then can fit in an entire block then + * we change the size + */ + if (size > MAX_BLOCK_USER_MEMORY(mp_p)) { + real_size = SIZE_OF_PAGES(mp_p, PAGES_IN_SIZE(mp_p, size)) - sizeof(ks_pool_block_t); + } else { + real_size = size; + } + + /* + * We use a specific free bits calculation here because if we are + * freeing 10 bytes then we will be putting it into the 8-byte free + * list and not the 16 byte list. size_to_bits(10) will return 4 + * instead of 3. + */ + bit_n = size_to_free_bits(real_size); + + /* + * Minimal error checking. We could go all the way through the + * list however this might be prohibitive. + */ + if (mp_p->mp_free[bit_n] == addr) { + return KS_STATUS_IS_FREE; + } + + /* add the freed pointer to the free list */ + if (bit_n < min_bit_free_next) { + /* + * Yes we know this will lose 99% of the allocations but what else + * can we do? No space for a next pointer. + */ + if (mp_p->mp_free[bit_n] == NULL) { + mp_p->mp_free[bit_n] = addr; + } + } else if (bit_n < min_bit_free_size) { + /* we copy, not assign, to maintain the free list */ + memcpy(addr, mp_p->mp_free + bit_n, sizeof(void *)); + mp_p->mp_free[bit_n] = addr; + } else { + + /* setup our free list structure */ + free_pnt.mf_next_p = mp_p->mp_free[bit_n]; + free_pnt.mf_size = real_size; + + /* we copy the structure in since we don't know about alignment */ + memcpy(addr, &free_pnt, sizeof(free_pnt)); + mp_p->mp_free[bit_n] = addr; + } + + return KS_STATUS_SUCCESS; +} + +/* + * static int split_block + * + * DESCRIPTION: + * + * When freeing space in a multi-block chunk we have to create new + * blocks out of the upper areas being freed. + * + * RETURNS: + * + * Success - KS_STATUS_SUCCESS + * + * Failure - Ks_Pool error code + * + * ARGUMENTS: + * + * mp_p <-> Pointer to the memory pool. + * + * free_addr -> Address that we are freeing. + * + * size -> Size of the space that we are taking from address. + */ +static int split_block(ks_pool_t *mp_p, void *free_addr, const unsigned long size) +{ + ks_pool_block_t *block_p, *new_block_p; + int ret, page_n; + void *end_p; + + /* + * 1st we find the block pointer from our free addr. At this point + * the pointer must be the 1st one in the block if it is spans + * multiple blocks. + */ + block_p = (ks_pool_block_t *) ((char *) free_addr - sizeof(ks_pool_block_t)); + if (block_p->mb_magic != BLOCK_MAGIC || block_p->mb_magic2 != BLOCK_MAGIC) { + return KS_STATUS_POOL_OVER; + } + + page_n = PAGES_IN_SIZE(mp_p, size); + + /* we are creating a new block structure for the 2nd ... */ + new_block_p = (ks_pool_block_t *) ((char *) block_p + SIZE_OF_PAGES(mp_p, page_n)); + new_block_p->mb_magic = BLOCK_MAGIC; + /* New bounds is 1st block bounds. The 1st block's is reset below. */ + new_block_p->mb_bounds_p = block_p->mb_bounds_p; + /* Continue the linked list. The 1st block will point to us below. */ + new_block_p->mb_next_p = block_p->mb_next_p; + new_block_p->mb_magic2 = BLOCK_MAGIC; + + /* bounds for the 1st block are reset to the 1st page only */ + block_p->mb_bounds_p = (char *) new_block_p; + /* the next block pointer for the 1st block is now the new one */ + block_p->mb_next_p = new_block_p; + + /* only free the space in the 1st block if it is only 1 block in size */ + if (page_n == 1) { + /* now free the rest of the 1st block block */ + end_p = (char *) free_addr + size; + ret = free_pointer(mp_p, end_p, (unsigned long)((char *) block_p->mb_bounds_p - (char *) end_p)); + if (ret != KS_STATUS_SUCCESS) { + return ret; + } + } + + /* now free the rest of the block */ + ret = free_pointer(mp_p, FIRST_ADDR_IN_BLOCK(new_block_p), (unsigned long)MEMORY_IN_BLOCK(new_block_p)); + if (ret != KS_STATUS_SUCCESS) { + return ret; + } + + return KS_STATUS_SUCCESS; +} + +/* + * static void *get_space + * + * DESCRIPTION: + * + * Moved a pointer into our free lists. + * + * RETURNS: + * + * Success - New address that we can use. + * + * Failure - NULL + * + * ARGUMENTS: + * + * mp_p <-> Pointer to the memory pool. + * + * byte_size -> Size of the address space that we need. + * + * error_p <- Pointer to ks_status_t which, if not NULL, will be set with + * a ks_pool error code. + */ +static void *get_space(ks_pool_t *mp_p, const unsigned long byte_size, ks_status_t *error_p) +{ + ks_pool_block_t *block_p; + ks_pool_free_t free_pnt; + int ret; + unsigned long size; + unsigned int bit_c, page_n, left; + void *free_addr = NULL, *free_end; + + size = byte_size; + while ((size & (sizeof(void *) - 1)) > 0) { + size++; + } + + /* + * First we check the free lists looking for something with enough + * pages. Maybe we should only look X bits higher in the list. + * + * XXX: this is where we'd do the best fit. We'd look for the + * closest match. We then could put the rest of the allocation that + * we did not use in a lower free list. Have a define which states + * how deep in the free list to go to find the closest match. + */ + for (bit_c = size_to_bits(size); bit_c <= MAX_BITS; bit_c++) { + if (mp_p->mp_free[bit_c] != NULL) { + free_addr = mp_p->mp_free[bit_c]; + break; + } + } + + /* + * If we haven't allocated any blocks or if the last block doesn't + * have enough memory then we need a new block. + */ + if (bit_c > MAX_BITS) { + + /* we need to allocate more space */ + + page_n = PAGES_IN_SIZE(mp_p, size); + + /* now we try and get the pages we need/want */ + block_p = alloc_pages(mp_p, page_n, error_p); + if (block_p == NULL) { + /* error_p set in alloc_pages */ + return NULL; + } + + /* init the block header */ + block_p->mb_magic = BLOCK_MAGIC; + block_p->mb_bounds_p = (char *) block_p + SIZE_OF_PAGES(mp_p, page_n); + block_p->mb_next_p = mp_p->mp_first_p; + block_p->mb_magic2 = BLOCK_MAGIC; + + /* + * We insert it into the front of the queue. We could add it to + * the end but there is not much use. + */ + mp_p->mp_first_p = block_p; + if (mp_p->mp_last_p == NULL) { + mp_p->mp_last_p = block_p; + } + + free_addr = FIRST_ADDR_IN_BLOCK(block_p); + +#ifdef DEBUG + (void) printf("had to allocate space for %lx of %lu bytes\n", (long) free_addr, size); +#endif + + free_end = (char *) free_addr + size; + left = (unsigned) ((char *) block_p->mb_bounds_p - (char *) free_end); + } else { + + if (bit_c < min_bit_free_next) { + mp_p->mp_free[bit_c] = NULL; + /* calculate the number of left over bytes */ + left = bits_to_size(bit_c) - size; + } else if (bit_c < min_bit_free_next) { + /* grab the next pointer from the freed address into our list */ + memcpy(mp_p->mp_free + bit_c, free_addr, sizeof(void *)); + /* calculate the number of left over bytes */ + left = bits_to_size(bit_c) - size; + } else { + /* grab the free structure from the address */ + memcpy(&free_pnt, free_addr, sizeof(free_pnt)); + mp_p->mp_free[bit_c] = free_pnt.mf_next_p; + + /* are we are splitting up a multiblock chunk into fewer blocks? */ + if (PAGES_IN_SIZE(mp_p, free_pnt.mf_size) > PAGES_IN_SIZE(mp_p, size)) { + ret = split_block(mp_p, free_addr, size); + if (ret != KS_STATUS_SUCCESS) { + SET_POINTER(error_p, ret); + return NULL; + } + /* left over memory was taken care of in split_block */ + left = 0; + } else { + /* calculate the number of left over bytes */ + left = free_pnt.mf_size - size; + } + } + +#ifdef DEBUG + (void) printf("found a free block at %lx of %lu bytes\n", (long) free_addr, left + size); +#endif + + free_end = (char *) free_addr + size; + } + + /* + * If we have memory left over then we free it so someone else can + * use it. We do not free the space if we just allocated a + * multi-block chunk because we need to have every allocation easily + * find the start of the block. Every user address % page-size + * should take us to the start of the block. + */ + if (left > 0 && size <= MAX_BLOCK_USER_MEMORY(mp_p)) { + /* free the rest of the block */ + ret = free_pointer(mp_p, free_end, left); + if (ret != KS_STATUS_SUCCESS) { + SET_POINTER(error_p, ret); + return NULL; + } + } + + /* update our bounds */ + if (free_addr > mp_p->mp_bounds_p) { + mp_p->mp_bounds_p = free_addr; + } else if (free_addr < mp_p->mp_min_p) { + mp_p->mp_min_p = free_addr; + } + + return free_addr; +} + +/* + * static void *alloc_mem + * + * DESCRIPTION: + * + * Allocate space for bytes inside of an already open memory pool. + * + * RETURNS: + * + * Success - Pointer to the address to use. + * + * Failure - NULL + * + * ARGUMENTS: + * + * mp_p <-> Pointer to the memory pool. If NULL then it will do a + * normal malloc. + * + * byte_size -> Number of bytes to allocate in the pool. Must be >0. + * + * error_p <- Pointer to ks_status_t which, if not NULL, will be set with + * a ks_pool error code. + */ +static void *alloc_mem(ks_pool_t *mp_p, const unsigned long byte_size, ks_status_t *error_p) +{ + unsigned long size, fence; + void *addr; + alloc_prefix_t *prefix; + + /* make sure we have enough bytes */ + if (byte_size < MIN_ALLOCATION) { + size = MIN_ALLOCATION; + } else { + size = byte_size; + } + + fence = FENCE_SIZE; + + /* get our free space + the space for the fence post */ + addr = get_space(mp_p, size + fence + PREFIX_SIZE, error_p); + if (addr == NULL) { + /* error_p set in get_space */ + return NULL; + } + + write_magic((char *) addr + size + PREFIX_SIZE); + prefix = (alloc_prefix_t *) addr; + prefix->m1 = PRE_MAGIC1; + prefix->m2 = PRE_MAGIC2; + prefix->size = size; + + /* maintain our stats */ + mp_p->mp_alloc_c++; + mp_p->mp_user_alloc += size; + if (mp_p->mp_user_alloc > mp_p->mp_max_alloc) { + mp_p->mp_max_alloc = mp_p->mp_user_alloc; + } + + SET_POINTER(error_p, KS_STATUS_SUCCESS); + return (uint8_t *)addr + PREFIX_SIZE; +} + +/* + * static int free_mem + * + * DESCRIPTION: + * + * Free an address from a memory pool. + * + * RETURNS: + * + * Success - KS_STATUS_SUCCESS + * + * Failure - Ks_Pool error code + * + * ARGUMENTS: + * + * mp_p <-> Pointer to the memory pool. If NULL then it will do a + * normal free. + * + * addr <-> Address to free. + * + */ +static int free_mem(ks_pool_t *mp_p, void *addr) +{ + unsigned long size, old_size, fence; + int ret; + ks_pool_block_t *block_p; + alloc_prefix_t *prefix; + + + prefix = (alloc_prefix_t *) ((char *) addr - PREFIX_SIZE); + if (!(prefix->m1 == PRE_MAGIC1 && prefix->m2 == PRE_MAGIC2)) { + return KS_STATUS_INVALID_POINTER; + } + + size = prefix->size; + + /* + * If the size is larger than a block then the allocation must be at + * the front of the block. + */ + if (size > MAX_BLOCK_USER_MEMORY(mp_p)) { + block_p = (ks_pool_block_t *) ((char *) addr - PREFIX_SIZE - sizeof(ks_pool_block_t)); + if (block_p->mb_magic != BLOCK_MAGIC || block_p->mb_magic2 != BLOCK_MAGIC) { + return KS_STATUS_POOL_OVER; + } + } + + /* make sure we have enough bytes */ + if (size < MIN_ALLOCATION) { + old_size = MIN_ALLOCATION; + } else { + old_size = size; + } + + /* find the user's magic numbers */ + ret = check_magic(addr, old_size); + + perform_pool_cleanup_on_free(mp_p, addr); + + /* move pointer to actual beginning */ + addr = prefix; + + if (ret != KS_STATUS_SUCCESS) { + return ret; + } + + fence = FENCE_SIZE; + + /* now we free the pointer */ + ret = free_pointer(mp_p, addr, old_size + fence); + if (ret != KS_STATUS_SUCCESS) { + return ret; + } + mp_p->mp_user_alloc -= old_size; + + /* adjust our stats */ + mp_p->mp_alloc_c--; + + return KS_STATUS_SUCCESS; +} + +/***************************** exported routines *****************************/ + +/* + * ks_pool_t *ks_pool_open + * + * DESCRIPTION: + * + * Open/allocate a new memory pool. + * + * RETURNS: + * + * Success - Pool pointer which must be passed to ks_pool_close to + * deallocate. + * + * Failure - NULL + * + * ARGUMENTS: + * + * flags -> Flags to set attributes of the memory pool. See the top + * of ks_pool.h. + * + * page_size -> Set the internal memory page-size. This must be a + * multiple of the getpagesize() value. Set to 0 for the default. + * + * start_addr -> Starting address to try and allocate memory pools. + * + * error_p <- Pointer to ks_status_t which, if not NULL, will be set with + * a ks_pool error code. + */ +static ks_pool_t *ks_pool_raw_open(const unsigned int flags, const unsigned int page_size, void *start_addr, ks_status_t *error_p) +{ + ks_pool_block_t *block_p; + int page_n, ret; + ks_pool_t mp, *mp_p; + void *free_addr; + + if (!enabled_b) { + startup(); + } + + /* zero our temp struct */ + memset(&mp, 0, sizeof(mp)); + + mp.mp_magic = KS_POOL_MAGIC; + mp.mp_flags = flags; + mp.mp_alloc_c = 0; + mp.mp_user_alloc = 0; + mp.mp_max_alloc = 0; + mp.mp_page_c = 0; + /* mp.mp_page_size set below */ + /* mp.mp_blocks_bit_n set below */ + /* mp.mp_top set below */ + /* mp.mp_addr set below */ + mp.mp_log_func = NULL; + mp.mp_min_p = NULL; + mp.mp_bounds_p = NULL; + mp.mp_first_p = NULL; + mp.mp_last_p = NULL; + mp.mp_magic2 = KS_POOL_MAGIC; + + /* get and sanity check our page size */ + if (page_size > 0) { + mp.mp_page_size = page_size; + if (mp.mp_page_size % getpagesize() != 0) { + SET_POINTER(error_p, KS_STATUS_ARG_INVALID); + return NULL; + } + } else { + mp.mp_page_size = getpagesize() * DEFAULT_PAGE_MULT; + if (mp.mp_page_size % 1024 != 0) { + SET_POINTER(error_p, KS_STATUS_PAGE_SIZE); + return NULL; + } + } + + mp.mp_addr = start_addr; + /* we start at the front of the file */ + mp.mp_top = 0; + + + /* + * Find out how many pages we need for our ks_pool structure. + * + * NOTE: this adds possibly unneeded space for ks_pool_block_t which + * may not be in this block. + */ + page_n = PAGES_IN_SIZE(&mp, sizeof(ks_pool_t)); + + /* now allocate us space for the actual struct */ + mp_p = alloc_pages(&mp, page_n, error_p); + if (mp_p == NULL) { + return NULL; + } + + /* + * NOTE: we do not normally free the rest of the block here because + * we want to lesson the chance of an allocation overwriting the + * main structure. + */ + if (BIT_IS_SET(flags, KS_POOL_FLAG_HEAVY_PACKING)) { + + /* we add a block header to the front of the block */ + block_p = (ks_pool_block_t *) mp_p; + + /* init the block header */ + block_p->mb_magic = BLOCK_MAGIC; + block_p->mb_bounds_p = (char *) block_p + SIZE_OF_PAGES(&mp, page_n); + block_p->mb_next_p = NULL; + block_p->mb_magic2 = BLOCK_MAGIC; + + /* the ks_pool pointer is then the 2nd thing in the block */ + mp_p = FIRST_ADDR_IN_BLOCK(block_p); + free_addr = (char *) mp_p + sizeof(ks_pool_t); + + /* free the rest of the block */ + ret = free_pointer(&mp, free_addr, (unsigned long)((char *) block_p->mb_bounds_p - (char *) free_addr)); + if (ret != KS_STATUS_SUCCESS) { + /* NOTE: after this line mp_p will be invalid */ + (void) free_pages(block_p, SIZE_OF_PAGES(&mp, page_n)); + + SET_POINTER(error_p, ret); + return NULL; + } + + /* + * NOTE: if we are HEAVY_PACKING then the 1st block with the ks_pool + * header is not on the block linked list. + */ + + /* now copy our tmp structure into our new memory area */ + memcpy(mp_p, &mp, sizeof(ks_pool_t)); + + /* we setup min/max to our current address which is as good as any */ + mp_p->mp_min_p = block_p; + mp_p->mp_bounds_p = block_p->mb_bounds_p; + } else { + /* now copy our tmp structure into our new memory area */ + memcpy(mp_p, &mp, sizeof(ks_pool_t)); + + /* we setup min/max to our current address which is as good as any */ + mp_p->mp_min_p = mp_p; + mp_p->mp_bounds_p = (char *) mp_p + SIZE_OF_PAGES(mp_p, page_n); + } + + SET_POINTER(error_p, KS_STATUS_SUCCESS); + return mp_p; +} + +/* + * ks_pool_t *ks_pool_open + * + * DESCRIPTION: + * + * Open/allocate a new memory pool. + * + * RETURNS: + * + * Success - KS_SUCCESS + * + * Failure - KS_FAIL + * + * ARGUMENTS: + * + * poolP <- pointer to new pool that will be set on success + * + */ + +KS_DECLARE(ks_status_t) ks_pool_open(ks_pool_t **poolP) +{ + ks_status_t err; + ks_pool_t *pool = ks_pool_raw_open(KS_POOL_FLAG_DEFAULT, 0, NULL, &err); + + if (pool && (err == KS_STATUS_SUCCESS)) { + ks_mutex_create(&pool->mutex, KS_MUTEX_FLAG_DEFAULT, NULL); + ks_mutex_create(&pool->cleanup_mutex, KS_MUTEX_FLAG_DEFAULT, NULL); + *poolP = pool; + return KS_STATUS_SUCCESS; + } else { + *poolP = NULL; + return err; + } +} + +/* + * int ks_pool_raw_close + * + * DESCRIPTION: + * + * Close/free a memory allocation pool previously opened with + * ks_pool_open. + * + * RETURNS: + * + * Success - KS_STATUS_SUCCESS + * + * Failure - Ks_Pool error code + * + * ARGUMENTS: + * + * mp_p <-> Pointer to our memory pool. + */ +static ks_status_t ks_pool_raw_close(ks_pool_t *mp_p) +{ + ks_pool_block_t *block_p, *next_p; + void *addr; + unsigned long size; + int ret, final = KS_STATUS_SUCCESS; + + /* special case, just return no-error */ + if (mp_p == NULL) { + return KS_STATUS_ARG_NULL; + } + if (mp_p->mp_magic != KS_POOL_MAGIC) { + return KS_STATUS_PNT; + } + if (mp_p->mp_magic2 != KS_POOL_MAGIC) { + return KS_STATUS_POOL_OVER; + } + + if (mp_p->mp_log_func != NULL) { + mp_p->mp_log_func(mp_p, KS_POOL_FUNC_CLOSE, 0, 0, NULL, NULL, 0); + } + + perform_pool_cleanup(mp_p); + + ks_mutex_t *mutex = mp_p->mutex, *cleanup_mutex = mp_p->cleanup_mutex; + ks_mutex_lock(mutex); + /* + * NOTE: if we are HEAVY_PACKING then the 1st block with the ks_pool + * header is not on the linked list. + */ + + /* free/invalidate the blocks */ + for (block_p = mp_p->mp_first_p; block_p != NULL; block_p = next_p) { + if (block_p->mb_magic != BLOCK_MAGIC || block_p->mb_magic2 != BLOCK_MAGIC) { + final = KS_STATUS_POOL_OVER; + break; + } + block_p->mb_magic = 0; + block_p->mb_magic2 = 0; + /* record the next pointer because it might be invalidated below */ + next_p = block_p->mb_next_p; + ret = free_pages(block_p, (unsigned long)((char *) block_p->mb_bounds_p - (char *) block_p)); + + if (ret != KS_STATUS_SUCCESS) { + final = ret; + } + } + + /* invalidate the ks_pool before we ditch it */ + mp_p->mp_magic = 0; + mp_p->mp_magic2 = 0; + + /* if we are heavy packing then we need to free the 1st block later */ + if (BIT_IS_SET(mp_p->mp_flags, KS_POOL_FLAG_HEAVY_PACKING)) { + addr = (char *) mp_p - sizeof(ks_pool_block_t); + } else { + addr = mp_p; + } + size = SIZE_OF_PAGES(mp_p, PAGES_IN_SIZE(mp_p, sizeof(ks_pool_t))); + + (void) munmap(addr, size); + + ks_mutex_unlock(mutex); + ks_mutex_destroy(&mutex); + ks_mutex_destroy(&cleanup_mutex); + + return final; +} + + +/* + * ks_status_t ks_pool_close + * + * DESCRIPTION: + * + * Close/free a memory allocation pool previously opened with + * ks_pool_open. + * + * RETURNS: + * + * Success - KS_STATUS_SUCCESS + * + * Failure - ks_status_t error code + * + * ARGUMENTS: + * + * mp_pp <-> Pointer to pointer of our memory pool. + * error_p <- Pointer to error + */ + +KS_DECLARE(ks_status_t) ks_pool_close(ks_pool_t **mp_pP) +{ + ks_status_t err; + + ks_assert(mp_pP); + + err = ks_pool_raw_close(*mp_pP); + + if (err == KS_STATUS_SUCCESS) { + *mp_pP = NULL; + } + + return err; +} + +/* + * int ks_pool_clear + * + * DESCRIPTION: + * + * Wipe an opened memory pool clean so we can start again. + * + * RETURNS: + * + * Success - KS_STATUS_SUCCESS + * + * Failure - Ks_Pool error code + * + * ARGUMENTS: + * + * mp_p <-> Pointer to our memory pool. + */ +KS_DECLARE(ks_status_t) ks_pool_clear(ks_pool_t *mp_p) +{ + ks_pool_block_t *block_p; + int final = KS_STATUS_SUCCESS, bit_n, ret; + void *first_p; + + /* special case, just return no-error */ + if (mp_p == NULL) { + return KS_STATUS_ARG_NULL; + } + if (mp_p->mp_magic != KS_POOL_MAGIC) { + return KS_STATUS_PNT; + } + if (mp_p->mp_magic2 != KS_POOL_MAGIC) { + return KS_STATUS_POOL_OVER; + } + + ks_mutex_lock(mp_p->mutex); + if (mp_p->mp_log_func != NULL) { + mp_p->mp_log_func(mp_p, KS_POOL_FUNC_CLEAR, 0, 0, NULL, NULL, 0); + } + + perform_pool_cleanup(mp_p); + + /* reset all of our free lists */ + for (bit_n = 0; bit_n <= MAX_BITS; bit_n++) { + mp_p->mp_free[bit_n] = NULL; + } + + /* free the blocks */ + for (block_p = mp_p->mp_first_p; block_p != NULL; block_p = block_p->mb_next_p) { + if (block_p->mb_magic != BLOCK_MAGIC || block_p->mb_magic2 != BLOCK_MAGIC) { + final = KS_STATUS_POOL_OVER; + break; + } + + first_p = FIRST_ADDR_IN_BLOCK(block_p); + + /* free the memory */ + ret = free_pointer(mp_p, first_p, (unsigned long)MEMORY_IN_BLOCK(block_p)); + if (ret != KS_STATUS_SUCCESS) { + final = ret; + } + } + ks_mutex_unlock(mp_p->mutex); + + return final; +} + +/* + * void *ks_pool_alloc_ex + * + * DESCRIPTION: + * + * Allocate space for bytes inside of an already open memory pool. + * + * RETURNS: + * + * Success - Pointer to the address to use. + * + * Failure - NULL + * + * ARGUMENTS: + * + * mp_p <-> Pointer to the memory pool. + * + * + * byte_size -> Number of bytes to allocate in the pool. Must be >0. + * + * error_p <- Pointer to integer which, if not NULL, will be set with + * a ks_pool error code. + */ +KS_DECLARE(void *) ks_pool_alloc_ex(ks_pool_t *mp_p, const unsigned long byte_size, ks_status_t *error_p) +{ + void *addr; + + ks_assert(mp_p); + + if (mp_p->mp_magic != KS_POOL_MAGIC) { + SET_POINTER(error_p, KS_STATUS_PNT); + return NULL; + } + if (mp_p->mp_magic2 != KS_POOL_MAGIC) { + SET_POINTER(error_p, KS_STATUS_POOL_OVER); + return NULL; + } + + if (byte_size == 0) { + SET_POINTER(error_p, KS_STATUS_ARG_INVALID); + return NULL; + } + + ks_mutex_lock(mp_p->mutex); + addr = alloc_mem(mp_p, byte_size, error_p); + ks_mutex_unlock(mp_p->mutex); + + if (mp_p->mp_log_func != NULL) { + mp_p->mp_log_func(mp_p, KS_POOL_FUNC_ALLOC, byte_size, 0, addr, NULL, 0); + } + + return addr; +} + +/* + * void *ks_pool_alloc + * + * DESCRIPTION: + * + * Allocate space for bytes inside of an already open memory pool. + * + * RETURNS: + * + * Success - Pointer to the address to use. + * + * Failure - NULL + * + * ARGUMENTS: + * + * mp_p <-> Pointer to the memory pool. + * + * + * byte_size -> Number of bytes to allocate in the pool. Must be >0. + * + */ +KS_DECLARE(void *) ks_pool_alloc(ks_pool_t *mp_p, const unsigned long byte_size) +{ + return ks_pool_alloc_ex(mp_p, byte_size, NULL); +} + + +/* + * void *ks_pool_calloc_ex + * + * DESCRIPTION: + * + * Allocate space for elements of bytes in the memory pool and zero + * the space afterwards. + * + * RETURNS: + * + * Success - Pointer to the address to use. + * + * Failure - NULL + * + * ARGUMENTS: + * + * mp_p <-> Pointer to the memory pool. If NULL then it will do a + * normal calloc. + * + * ele_n -> Number of elements to allocate. + * + * ele_size -> Number of bytes per element being allocated. + * + * error_p <- Pointer to integer which, if not NULL, will be set with + * a ks_pool error code. + */ +KS_DECLARE(void *) ks_pool_calloc_ex(ks_pool_t *mp_p, const unsigned long ele_n, const unsigned long ele_size, ks_status_t *error_p) +{ + void *addr; + unsigned long byte_size; + + ks_assert(mp_p); + + if (mp_p->mp_magic != KS_POOL_MAGIC) { + SET_POINTER(error_p, KS_STATUS_PNT); + return NULL; + } + if (mp_p->mp_magic2 != KS_POOL_MAGIC) { + SET_POINTER(error_p, KS_STATUS_POOL_OVER); + return NULL; + } + + if (ele_n == 0 || ele_size == 0) { + SET_POINTER(error_p, KS_STATUS_ARG_INVALID); + return NULL; + } + + ks_mutex_lock(mp_p->mutex); + byte_size = ele_n * ele_size; + addr = alloc_mem(mp_p, byte_size, error_p); + if (addr != NULL) { + memset(addr, 0, byte_size); + } + ks_mutex_unlock(mp_p->mutex); + + if (mp_p->mp_log_func != NULL) { + mp_p->mp_log_func(mp_p, KS_POOL_FUNC_CALLOC, ele_size, ele_n, addr, NULL, 0); + } + + return addr; +} + +/* + * void *ks_pool_calloc + * + * DESCRIPTION: + * + * Allocate space for elements of bytes in the memory pool and zero + * the space afterwards. + * + * RETURNS: + * + * Success - Pointer to the address to use. + * + * Failure - NULL + * + * ARGUMENTS: + * + * mp_p <-> Pointer to the memory pool. If NULL then it will do a + * normal calloc. + * + * ele_n -> Number of elements to allocate. + * + * ele_size -> Number of bytes per element being allocated. + * + */ +KS_DECLARE(void *) ks_pool_calloc(ks_pool_t *mp_p, const unsigned long ele_n, const unsigned long ele_size) +{ + return ks_pool_calloc_ex(mp_p, ele_n, ele_size, NULL); +} + +/* + * int ks_pool_free + * + * DESCRIPTION: + * + * Free an address from a memory pool. + * + * RETURNS: + * + * Success - KS_STATUS_SUCCESS + * + * Failure - ks_status_t error code + * + * ARGUMENTS: + * + * mp_p <-> Pointer to the memory pool. + * + * + * addr <-> Address to free. + * + */ +KS_DECLARE(ks_status_t) ks_pool_free(ks_pool_t *mp_p, void *addr) +{ + ks_status_t r; + + ks_assert(mp_p); + ks_assert(addr); + + ks_mutex_lock(mp_p->mutex); + + if (mp_p->mp_magic != KS_POOL_MAGIC) { + r = KS_STATUS_PNT; + goto end; + } + + if (mp_p->mp_magic2 != KS_POOL_MAGIC) { + r = KS_STATUS_POOL_OVER; + goto end; + } + + if (mp_p->mp_log_func != NULL) { + alloc_prefix_t *prefix = (alloc_prefix_t *) ((char *) addr - PREFIX_SIZE); + mp_p->mp_log_func(mp_p, KS_POOL_FUNC_FREE, prefix->size, 0, NULL, addr, 0); + } + + r = free_mem(mp_p, addr); + + end: + + ks_mutex_unlock(mp_p->mutex); + + return r; + +} + +/* + * void *ks_pool_resize_ex + * + * DESCRIPTION: + * + * Reallocate an address in a mmeory pool to a new size. This is + * different from realloc in that it needs the old address' size. + * + * RETURNS: + * + * Success - Pointer to the address to use. + * + * Failure - NULL + * + * ARGUMENTS: + * + * mp_p <-> Pointer to the memory pool. + * + * + * old_addr -> Previously allocated address. + * + * new_byte_size -> New size of the allocation. + * + * error_p <- Pointer to integer which, if not NULL, will be set with + * a ks_pool error code. + */ +KS_DECLARE(void *) ks_pool_resize_ex(ks_pool_t *mp_p, void *old_addr, const unsigned long new_byte_size, ks_status_t *error_p) +{ + unsigned long copy_size, new_size, old_size, old_byte_size; + void *new_addr; + ks_pool_block_t *block_p; + int ret; + alloc_prefix_t *prefix; + + ks_assert(mp_p); + //ks_assert(old_addr); + + if (!old_addr) { + return ks_pool_alloc_ex(mp_p, new_byte_size, error_p); + } + + if (mp_p->mp_magic != KS_POOL_MAGIC) { + SET_POINTER(error_p, KS_STATUS_PNT); + return NULL; + } + if (mp_p->mp_magic2 != KS_POOL_MAGIC) { + SET_POINTER(error_p, KS_STATUS_POOL_OVER); + return NULL; + } + + prefix = (alloc_prefix_t *) ((char *) old_addr - PREFIX_SIZE); + + if (!(prefix->m1 == PRE_MAGIC1 && prefix->m2 == PRE_MAGIC2)) { + SET_POINTER(error_p, KS_STATUS_INVALID_POINTER); + return NULL; + } + + + ks_mutex_lock(mp_p->mutex); + old_byte_size = prefix->size; + + /* + * If the size is larger than a block then the allocation must be at + * the front of the block. + */ + if (old_byte_size > MAX_BLOCK_USER_MEMORY(mp_p)) { + block_p = (ks_pool_block_t *) ((char *) old_addr - PREFIX_SIZE - sizeof(ks_pool_block_t)); + if (block_p->mb_magic != BLOCK_MAGIC || block_p->mb_magic2 != BLOCK_MAGIC) { + SET_POINTER(error_p, KS_STATUS_POOL_OVER); + new_addr = NULL; + goto end; + } + } + + /* make sure we have enough bytes */ + if (old_byte_size < MIN_ALLOCATION) { + old_size = MIN_ALLOCATION; + } else { + old_size = old_byte_size; + } + + /* verify that the size matches exactly */ + + if (old_size > 0) { + ret = check_magic(old_addr, old_size); + if (ret != KS_STATUS_SUCCESS) { + SET_POINTER(error_p, ret); + new_addr = NULL; + goto end; + } + } + + /* move pointer to actual beginning */ + old_addr = prefix; + + /* make sure we have enough bytes */ + if (new_byte_size < MIN_ALLOCATION) { + new_size = MIN_ALLOCATION; + } else { + new_size = new_byte_size; + } + + /* + * NOTE: we could here see if the size is the same or less and then + * use the current memory and free the space above. This is harder + * than it sounds if we are changing the block size of the + * allocation. + */ + + /* we need to get another address */ + new_addr = alloc_mem(mp_p, new_size, error_p); + if (new_addr == NULL) { + /* error_p set in ks_pool_alloc */ + new_addr = NULL; + goto end; + } + + if (new_byte_size > old_byte_size) { + copy_size = old_byte_size; + } else { + copy_size = new_byte_size; + } + memcpy(new_addr, old_addr, copy_size); + + /* free the old address */ + ret = free_mem(mp_p, (uint8_t *)old_addr + PREFIX_SIZE); + if (ret != KS_STATUS_SUCCESS) { + /* if the old free failed, try and free the new address */ + (void) free_mem(mp_p, new_addr); + SET_POINTER(error_p, ret); + new_addr = NULL; + goto end; + } + + if (mp_p->mp_log_func != NULL) { + mp_p->mp_log_func(mp_p, KS_POOL_FUNC_RESIZE, new_byte_size, 0, new_addr, old_addr, old_byte_size); + } + + SET_POINTER(error_p, KS_STATUS_SUCCESS); + + end: + + ks_mutex_unlock(mp_p->mutex); + + return new_addr; +} + +/* + * void *ks_pool_resize + * + * DESCRIPTION: + * + * Reallocate an address in a mmeory pool to a new size. This is + * different from realloc in that it needs the old address' size. + * + * RETURNS: + * + * Success - Pointer to the address to use. + * + * Failure - NULL + * + * ARGUMENTS: + * + * mp_p <-> Pointer to the memory pool. + * + * + * old_addr -> Previously allocated address. + * + * new_byte_size -> New size of the allocation. + * + */ +KS_DECLARE(void *) ks_pool_resize(ks_pool_t *mp_p, void *old_addr, const unsigned long new_byte_size) +{ + return ks_pool_resize_ex(mp_p, old_addr, new_byte_size, NULL); +} + +/* + * int ks_pool_stats + * + * DESCRIPTION: + * + * Return stats from the memory pool. + * + * RETURNS: + * + * Success - KS_STATUS_SUCCESS + * + * Failure - ks_status_t error code + * + * ARGUMENTS: + * + * mp_p -> Pointer to the memory pool. + * + * page_size_p <- Pointer to an unsigned integer which, if not NULL, + * will be set to the page-size of the pool. + * + * num_alloced_p <- Pointer to an unsigned long which, if not NULL, + * will be set to the number of pointers currently allocated in pool. + * + * user_alloced_p <- Pointer to an unsigned long which, if not NULL, + * will be set to the number of user bytes allocated in this pool. + * + * max_alloced_p <- Pointer to an unsigned long which, if not NULL, + * will be set to the maximum number of user bytes that have been + * allocated in this pool. + * + * tot_alloced_p <- Pointer to an unsigned long which, if not NULL, + * will be set to the total amount of space (including administrative + * overhead) used by the pool. + */ +KS_DECLARE(ks_status_t) ks_pool_stats(const ks_pool_t *mp_p, unsigned int *page_size_p, + unsigned long *num_alloced_p, unsigned long *user_alloced_p, unsigned long *max_alloced_p, unsigned long *tot_alloced_p) +{ + if (mp_p == NULL) { + return KS_STATUS_ARG_NULL; + } + if (mp_p->mp_magic != KS_POOL_MAGIC) { + return KS_STATUS_PNT; + } + if (mp_p->mp_magic2 != KS_POOL_MAGIC) { + return KS_STATUS_POOL_OVER; + } + + SET_POINTER(page_size_p, mp_p->mp_page_size); + SET_POINTER(num_alloced_p, mp_p->mp_alloc_c); + SET_POINTER(user_alloced_p, mp_p->mp_user_alloc); + SET_POINTER(max_alloced_p, mp_p->mp_max_alloc); + SET_POINTER(tot_alloced_p, SIZE_OF_PAGES(mp_p, mp_p->mp_page_c)); + + return KS_STATUS_SUCCESS; +} + +/* + * int ks_pool_set_log_func + * + * DESCRIPTION: + * + * Set a logging callback function to be called whenever there was a + * memory transaction. See ks_pool_log_func_t. + * + * RETURNS: + * + * Success - KS_STATUS_SUCCESS + * + * Failure - ks_status_t error code + * + * ARGUMENTS: + * + * mp_p <-> Pointer to the memory pool. + * + * log_func -> Log function (defined in ks_pool.h) which will be called + * with each ks_pool transaction. + */ +KS_DECLARE(ks_status_t) ks_pool_set_log_func(ks_pool_t *mp_p, ks_pool_log_func_t log_func) +{ + if (mp_p == NULL) { + return KS_STATUS_ARG_NULL; + } + if (mp_p->mp_magic != KS_POOL_MAGIC) { + return KS_STATUS_PNT; + } + if (mp_p->mp_magic2 != KS_POOL_MAGIC) { + return KS_STATUS_POOL_OVER; + } + + mp_p->mp_log_func = log_func; + + return KS_STATUS_SUCCESS; +} + +/* + * int ks_pool_set_max_pages + * + * DESCRIPTION: + * + * Set the maximum number of pages that the library will use. Once it + * hits the limit it will return KS_STATUS_NO_PAGES. + * + * NOTE: if the KS_POOL_FLAG_HEAVY_PACKING is set then this max-pages + * value will include the page with the ks_pool header structure in it. + * If the flag is _not_ set then the max-pages will not include this + * first page. + * + * RETURNS: + * + * Success - KS_STATUS_SUCCESS + * + * Failure - ks_status_t error code + * + * ARGUMENTS: + * + * mp_p <-> Pointer to the memory pool. + * + * max_pages -> Maximum number of pages used by the library. + */ +KS_DECLARE(ks_status_t) ks_pool_set_max_pages(ks_pool_t *mp_p, const unsigned int max_pages) +{ + if (mp_p == NULL) { + return KS_STATUS_ARG_NULL; + } + if (mp_p->mp_magic != KS_POOL_MAGIC) { + return KS_STATUS_PNT; + } + if (mp_p->mp_magic2 != KS_POOL_MAGIC) { + return KS_STATUS_POOL_OVER; + } + + if (BIT_IS_SET(mp_p->mp_flags, KS_POOL_FLAG_HEAVY_PACKING)) { + mp_p->mp_max_pages = max_pages; + } else { + /* + * If we are not heavy-packing the pool then we don't count the + * 1st page allocated which holds the ks_pool header structure. + */ + mp_p->mp_max_pages = max_pages + 1; + } + + return KS_STATUS_SUCCESS; +} + +/* + * const char *ks_pool_strerror + * + * DESCRIPTION: + * + * Return the corresponding string for the error number. + * + * RETURNS: + * + * Success - String equivalient of the error. + * + * Failure - String "invalid error code" + * + * ARGUMENTS: + * + * error -> ks_status_t that we are converting. + */ +KS_DECLARE(const char *) ks_pool_strerror(const ks_status_t error) +{ + switch (error) { + case KS_STATUS_SUCCESS: + return "no error"; + break; + case KS_STATUS_ARG_NULL: + return "function argument is null"; + break; + case KS_STATUS_ARG_INVALID: + return "function argument is invalid"; + break; + case KS_STATUS_PNT: + return "invalid ks_pool pointer"; + break; + case KS_STATUS_POOL_OVER: + return "ks_pool structure was overwritten"; + break; + case KS_STATUS_PAGE_SIZE: + return "could not get system page-size"; + break; + case KS_STATUS_OPEN_ZERO: + return "could not open /dev/zero"; + break; + case KS_STATUS_NO_MEM: + return "no memory available"; + break; + case KS_STATUS_MMAP: + return "problems with mmap"; + break; + case KS_STATUS_SIZE: + return "error processing requested size"; + break; + case KS_STATUS_TOO_BIG: + return "allocation exceeds pool max size"; + break; + case KS_STATUS_MEM: + return "invalid memory address"; + break; + case KS_STATUS_MEM_OVER: + return "memory lower bounds overwritten"; + break; + case KS_STATUS_NOT_FOUND: + return "memory block not found in pool"; + break; + case KS_STATUS_IS_FREE: + return "memory address has already been freed"; + break; + case KS_STATUS_BLOCK_STAT: + return "invalid internal block status"; + break; + case KS_STATUS_FREE_ADDR: + return "invalid internal free address"; + break; + case KS_STATUS_NO_PAGES: + return "no available pages left in pool"; + break; + case KS_STATUS_ALLOC: + return "system alloc function failed"; + break; + case KS_STATUS_PNT_OVER: + return "user pointer admin space overwritten"; + break; + case KS_STATUS_INVALID_POINTER: + return "pointer is not valid"; + break; + default: + break; + } + + return "invalid error code"; +} + +KS_DECLARE(char *) ks_pstrdup(ks_pool_t *pool, const char *str) +{ + char *result; + unsigned long len; + + if (!str) { + return NULL; + } + + len = (unsigned long)strlen(str) + 1; + result = ks_pool_alloc(pool, len); + memcpy(result, str, len); + + return result; +} + +KS_DECLARE(char *) ks_pstrndup(ks_pool_t *pool, const char *str, size_t len) +{ + char *result; + const char *end; + + if (!str) { + return NULL; + } + + end = memchr(str, '\0', len); + + if (!end) { + len = end - str; + } + + result = ks_pool_alloc(pool, (unsigned long)(len + 1)); + memcpy(result, str, len); + result[len] = '\0'; + + return result; +} + +KS_DECLARE(char *) ks_pstrmemdup(ks_pool_t *pool, const char *str, size_t len) +{ + char *result; + + if (!str) { + return NULL; + } + + result = ks_pool_alloc(pool, (unsigned long)(len + 1)); + memcpy(result, str, len); + result[len] = '\0'; + + return result; +} + +KS_DECLARE(void *) ks_pmemdup(ks_pool_t *pool, const void *buf, size_t len) +{ + void *result; + + if (!buf) { + return NULL; + } + + result = ks_pool_alloc(pool, (unsigned long)len); + memcpy(result, buf, len); + + return result; +} + +KS_DECLARE(char *) ks_pstrcat(ks_pool_t *pool, ...) +{ + char *endp, *argp; + char *result; + size_t lengths[10]; + int i = 0; + size_t len = 0; + va_list ap; + + va_start(ap, pool); + + /* get lengths so we know what to allocate, cache some so we don't have to double strlen those */ + + while ((argp = va_arg(ap, char *))) { + size_t arglen = strlen(argp); + if (i < 10) lengths[i++] = arglen; + len += arglen; + } + + va_end(ap); + + result = (char *) ks_pool_alloc(pool, (unsigned long)(len + 1)); + endp = result; + + va_start(ap, pool); + + i = 0; + + while ((argp = va_arg(ap, char *))) { + len = (i < 10) ? lengths[i++] : strlen(argp); + memcpy(endp, argp, len); + endp += len; + } + + va_end(ap); + + *endp = '\0'; + + return result; +} + +/* For Emacs: + * Local Variables: + * mode:c + * indent-tabs-mode:t + * tab-width:4 + * c-basic-offset:4 + * End: + * For VIM: + * vim:set softtabstop=4 shiftwidth=4 tabstop=4 noet: + */ diff --git a/libs/libks/src/ks_printf.c b/libs/libks/src/ks_printf.c new file mode 100644 index 0000000000..af62780904 --- /dev/null +++ b/libs/libks/src/ks_printf.c @@ -0,0 +1,980 @@ +/* +** The "printf" code that follows dates from the 1980's. It is in +** the public domain. The original comments are included here for +** completeness. They are very out-of-date but might be useful as +** an historical reference. Most of the "enhancements" have been backed +** out so that the functionality is now the same as standard printf(). +** +************************************************************************** +** +** The following modules is an enhanced replacement for the "printf" subroutines +** found in the standard C library. The following enhancements are +** supported: +** +** + Additional functions. The standard set of "printf" functions +** includes printf, fprintf, sprintf, vprintf, vfprintf, and +** vsprintf. This module adds the following: +** +** * snprintf -- Works like sprintf, but has an extra argument +** which is the size of the buffer written to. +** +** * mprintf -- Similar to sprintf. Writes output to memory +** obtained from malloc. +** +** * xprintf -- Calls a function to dispose of output. +** +** * nprintf -- No output, but returns the number of characters +** that would have been output by printf. +** +** * A v- version (ex: vsnprintf) of every function is also +** supplied. +** +** + A few extensions to the formatting notation are supported: +** +** * The "=" flag (similar to "-") causes the output to be +** be centered in the appropriately sized field. +** +** * The %b field outputs an integer in binary notation. +** +** * The %c field now accepts a precision. The character output +** is repeated by the number of times the precision specifies. +** +** * The %' field works like %c, but takes as its character the +** next character of the format string, instead of the next +** argument. For example, printf("%.78'-") prints 78 minus +** signs, the same as printf("%.78c",'-'). +** +** + When compiled using GCC on a SPARC, this version of printf is +** faster than the library printf for SUN OS 4.1. +** +** + All functions are fully reentrant. +** +*/ +/* + * 20090210 (stkn): + * Taken from sqlite-3.3.x, + * renamed SQLITE_ -> KS_, + * renamed visible functions to ks_* + * disabled functions without extra conversion specifiers + */ + +#include + +#define LONGDOUBLE_TYPE long double + +/* +** Conversion types fall into various categories as defined by the +** following enumeration. +*/ +#define etRADIX 1 /* Integer types. %d, %x, %o, and so forth */ +#define etFLOAT 2 /* Floating point. %f */ +#define etEXP 3 /* Exponentional notation. %e and %E */ +#define etGENERIC 4 /* Floating or exponential, depending on exponent. %g */ +#define etSIZE 5 /* Return number of characters processed so far. %n */ +#define etSTRING 6 /* Strings. %s */ +#define etDYNSTRING 7 /* Dynamically allocated strings. %z */ +#define etPERCENT 8 /* Percent symbol. %% */ +#define etCHARX 9 /* Characters. %c */ +/* The rest are extensions, not normally found in printf() */ +#define etCHARLIT 10 /* Literal characters. %' */ +#define etSQLESCAPE 11 /* Strings with '\'' doubled. %q */ +#define etSQLESCAPE2 12 /* Strings with '\'' doubled and enclosed in '', + NULL pointers replaced by SQL NULL. %Q */ +#ifdef __UNSUPPORTED__ +#define etTOKEN 13 /* a pointer to a Token structure */ +#define etSRCLIST 14 /* a pointer to a SrcList */ +#endif +#define etPOINTER 15 /* The %p conversion */ +#define etSQLESCAPE3 16 +#define etSQLESCAPE4 17 + +/* +** An "etByte" is an 8-bit unsigned value. +*/ +typedef unsigned char etByte; + +/* +** Each builtin conversion character (ex: the 'd' in "%d") is described +** by an instance of the following structure +*/ +typedef struct et_info { /* Information about each format field */ + char fmttype; /* The format field code letter */ + etByte base; /* The base for radix conversion */ + etByte flags; /* One or more of FLAG_ constants below */ + etByte type; /* Conversion paradigm */ + etByte charset; /* Offset into aDigits[] of the digits string */ + etByte prefix; /* Offset into aPrefix[] of the prefix string */ +} et_info; + +/* +** Allowed values for et_info.flags +*/ +#define FLAG_SIGNED 1 /* True if the value to convert is signed */ +#define FLAG_INTERN 2 /* True if for internal use only */ +#define FLAG_STRING 4 /* Allow infinity precision */ + + +/* +** The following table is searched linearly, so it is good to put the +** most frequently used conversion types first. +*/ +static const char aDigits[] = "0123456789ABCDEF0123456789abcdef"; +static const char aPrefix[] = "-x0\000X0"; +static const et_info fmtinfo[] = { + {'d', 10, 1, etRADIX, 0, 0}, + {'s', 0, 4, etSTRING, 0, 0}, + {'g', 0, 1, etGENERIC, 30, 0}, + {'z', 0, 6, etDYNSTRING, 0, 0}, + {'q', 0, 4, etSQLESCAPE, 0, 0}, + {'Q', 0, 4, etSQLESCAPE2, 0, 0}, + {'w', 0, 4, etSQLESCAPE3, 0, 0}, + {'y', 0, 4, etSQLESCAPE4, 0, 0}, + {'c', 0, 0, etCHARX, 0, 0}, + {'o', 8, 0, etRADIX, 0, 2}, + {'u', 10, 0, etRADIX, 0, 0}, + {'x', 16, 0, etRADIX, 16, 1}, + {'X', 16, 0, etRADIX, 0, 4}, +#ifndef KS_OMIT_FLOATING_POINT + {'f', 0, 1, etFLOAT, 0, 0}, + {'e', 0, 1, etEXP, 30, 0}, + {'E', 0, 1, etEXP, 14, 0}, + {'G', 0, 1, etGENERIC, 14, 0}, +#endif + {'i', 10, 1, etRADIX, 0, 0}, + {'n', 0, 0, etSIZE, 0, 0}, + {'%', 0, 0, etPERCENT, 0, 0}, + {'p', 16, 0, etPOINTER, 0, 1}, +#ifdef __UNSUPPORTED__ + {'T', 0, 2, etTOKEN, 0, 0}, + {'S', 0, 2, etSRCLIST, 0, 0}, +#endif +}; + +#define etNINFO (sizeof(fmtinfo)/sizeof(fmtinfo[0])) + +/* +** If KS_OMIT_FLOATING_POINT is defined, then none of the floating point +** conversions will work. +*/ +#ifndef KS_OMIT_FLOATING_POINT +/* +** "*val" is a double such that 0.1 <= *val < 10.0 +** Return the ascii code for the leading digit of *val, then +** multiply "*val" by 10.0 to renormalize. +** +** Example: +** input: *val = 3.14159 +** output: *val = 1.4159 function return = '3' +** +** The counter *cnt is incremented each time. After counter exceeds +** 16 (the number of significant digits in a 64-bit float) '0' is +** always returned. +*/ +static int et_getdigit(LONGDOUBLE_TYPE * val, int *cnt) +{ + int digit; + LONGDOUBLE_TYPE d; + if ((*cnt)++ >= 16) + return '0'; + digit = (int) *val; + d = digit; + digit += '0'; + *val = (*val - d) * 10.0; + return digit; +} +#endif /* KS_OMIT_FLOATING_POINT */ + +/* +** On machines with a small stack size, you can redefine the +** KS_PRINT_BUF_SIZE to be less than 350. But beware - for +** smaller values some %f conversions may go into an infinite loop. +*/ +#ifndef KS_PRINT_BUF_SIZE +# define KS_PRINT_BUF_SIZE 350 +#endif +#define etBUFSIZE KS_PRINT_BUF_SIZE /* Size of the output buffer */ + +/* +** The root program. All variations call this core. +** +** INPUTS: +** func This is a pointer to a function taking three arguments +** 1. A pointer to anything. Same as the "arg" parameter. +** 2. A pointer to the list of characters to be output +** (Note, this list is NOT null terminated.) +** 3. An integer number of characters to be output. +** (Note: This number might be zero.) +** +** arg This is the pointer to anything which will be passed as the +** first argument to "func". Use it for whatever you like. +** +** fmt This is the format string, as in the usual print. +** +** ap This is a pointer to a list of arguments. Same as in +** vfprint. +** +** OUTPUTS: +** The return value is the total number of characters sent to +** the function "func". Returns -1 on a error. +** +** Note that the order in which automatic variables are declared below +** seems to make a big difference in determining how fast this beast +** will run. +*/ +static int vxprintf(void (*func) (void *, const char *, int), /* Consumer of text */ + void *arg, /* First argument to the consumer */ + int useExtended, /* Allow extended %-conversions */ + const char *fmt, /* Format string */ + va_list ap /* arguments */ + ) +{ + int c; /* Next character in the format string */ + char *bufpt; /* Pointer to the conversion buffer */ + int precision; /* Precision of the current field */ + int length; /* Length of the field */ + int idx; /* A general purpose loop counter */ + int count; /* Total number of characters output */ + int width; /* Width of the current field */ + etByte flag_leftjustify; /* True if "-" flag is present */ + etByte flag_plussign; /* True if "+" flag is present */ + etByte flag_blanksign; /* True if " " flag is present */ + etByte flag_alternateform; /* True if "#" flag is present */ + etByte flag_altform2; /* True if "!" flag is present */ + etByte flag_zeropad; /* True if field width constant starts with zero */ + etByte flag_long; /* True if "l" flag is present */ + etByte flag_longlong; /* True if the "ll" flag is present */ + etByte done; /* Loop termination flag */ + uint64_t longvalue; /* Value for integer types */ + LONGDOUBLE_TYPE realvalue; /* Value for real types */ + const et_info *infop; /* Pointer to the appropriate info structure */ + char buf[etBUFSIZE]; /* Conversion buffer */ + char prefix; /* Prefix character. "+" or "-" or " " or '\0'. */ + etByte errorflag = 0; /* True if an error is encountered */ + etByte xtype = 0; /* Conversion paradigm */ + char *zExtra; /* Extra memory used for etTCLESCAPE conversions */ + static const char spaces[] = " "; +#define etSPACESIZE (sizeof(spaces)-1) +#ifndef KS_OMIT_FLOATING_POINT + int exp, e2; /* exponent of real numbers */ + double rounder; /* Used for rounding floating point values */ + etByte flag_dp; /* True if decimal point should be shown */ + etByte flag_rtz; /* True if trailing zeros should be removed */ + etByte flag_exp; /* True to force display of the exponent */ + int nsd; /* Number of significant digits returned */ +#endif + + func(arg, "", 0); + count = length = 0; + bufpt = 0; + for (; (c = (*fmt)) != 0; ++fmt) { + if (c != '%') { + int amt; + bufpt = (char *) fmt; + amt = 1; + while ((c = (*++fmt)) != '%' && c != 0) + amt++; + (*func) (arg, bufpt, amt); + count += amt; + if (c == 0) + break; + } + if ((c = (*++fmt)) == 0) { + errorflag = 1; + (*func) (arg, "%", 1); + count++; + break; + } + /* Find out what flags are present */ + flag_leftjustify = flag_plussign = flag_blanksign = flag_alternateform = flag_altform2 = flag_zeropad = 0; + done = 0; + do { + switch (c) { + case '-': + flag_leftjustify = 1; + break; + case '+': + flag_plussign = 1; + break; + case ' ': + flag_blanksign = 1; + break; + case '#': + flag_alternateform = 1; + break; + case '!': + flag_altform2 = 1; + break; + case '0': + flag_zeropad = 1; + break; + default: + done = 1; + break; + } + } while (!done && (c = (*++fmt)) != 0); + /* Get the field width */ + width = 0; + if (c == '*') { + width = va_arg(ap, int); + if (width < 0) { + flag_leftjustify = 1; + width = -width; + } + c = *++fmt; + } else { + while (c >= '0' && c <= '9') { + width = width * 10 + c - '0'; + c = *++fmt; + } + } + if (width > etBUFSIZE - 10) { + width = etBUFSIZE - 10; + } + /* Get the precision */ + if (c == '.') { + precision = 0; + c = *++fmt; + if (c == '*') { + precision = va_arg(ap, int); + if (precision < 0) + precision = -precision; + c = *++fmt; + } else { + while (c >= '0' && c <= '9') { + precision = precision * 10 + c - '0'; + c = *++fmt; + } + } + } else { + precision = -1; + } + /* Get the conversion type modifier */ + if (c == 'l') { + flag_long = 1; + c = *++fmt; + if (c == 'l') { + flag_longlong = 1; + c = *++fmt; + } else { + flag_longlong = 0; + } + } else { + flag_long = flag_longlong = 0; + } + /* Fetch the info entry for the field */ + infop = 0; + for (idx = 0; idx < etNINFO; idx++) { + if (c == fmtinfo[idx].fmttype) { + infop = &fmtinfo[idx]; + if (useExtended || (infop->flags & FLAG_INTERN) == 0) { + xtype = infop->type; + } else { + return -1; + } + break; + } + } + zExtra = 0; + if (infop == 0) { + return -1; + } + + + /* Limit the precision to prevent overflowing buf[] during conversion */ + if (precision > etBUFSIZE - 40 && (infop->flags & FLAG_STRING) == 0) { + precision = etBUFSIZE - 40; + } + + /* + ** At this point, variables are initialized as follows: + ** + ** flag_alternateform TRUE if a '#' is present. + ** flag_altform2 TRUE if a '!' is present. + ** flag_plussign TRUE if a '+' is present. + ** flag_leftjustify TRUE if a '-' is present or if the + ** field width was negative. + ** flag_zeropad TRUE if the width began with 0. + ** flag_long TRUE if the letter 'l' (ell) prefixed + ** the conversion character. + ** flag_longlong TRUE if the letter 'll' (ell ell) prefixed + ** the conversion character. + ** flag_blanksign TRUE if a ' ' is present. + ** width The specified field width. This is + ** always non-negative. Zero is the default. + ** precision The specified precision. The default + ** is -1. + ** xtype The class of the conversion. + ** infop Pointer to the appropriate info struct. + */ + switch (xtype) { + case etPOINTER: + flag_longlong = sizeof(char *) == sizeof(int64_t); + flag_long = sizeof(char *) == sizeof(long int); + /* Fall through into the next case */ + case etRADIX: + if (infop->flags & FLAG_SIGNED) { + int64_t v; + if (flag_longlong) + v = va_arg(ap, int64_t); + else if (flag_long) + v = va_arg(ap, long int); + else + v = va_arg(ap, int); + if (v < 0) { + longvalue = -v; + prefix = '-'; + } else { + longvalue = v; + if (flag_plussign) + prefix = '+'; + else if (flag_blanksign) + prefix = ' '; + else + prefix = 0; + } + } else { + if (flag_longlong) + longvalue = va_arg(ap, uint64_t); + else if (flag_long) + longvalue = va_arg(ap, unsigned long int); + else + longvalue = va_arg(ap, unsigned int); + prefix = 0; + } + if (longvalue == 0) + flag_alternateform = 0; + if (flag_zeropad && precision < width - (prefix != 0)) { + precision = width - (prefix != 0); + } + bufpt = &buf[etBUFSIZE - 1]; + { + register const char *cset; /* Use registers for speed */ + register int base; + cset = &aDigits[infop->charset]; + base = infop->base; + do { /* Convert to ascii */ + *(--bufpt) = cset[longvalue % base]; + longvalue = longvalue / base; + } while (longvalue > 0); + } + length = (int)(&buf[etBUFSIZE - 1] - bufpt); + for (idx = precision - length; idx > 0; idx--) { + *(--bufpt) = '0'; /* Zero pad */ + } + if (prefix) + *(--bufpt) = prefix; /* Add sign */ + if (flag_alternateform && infop->prefix) { /* Add "0" or "0x" */ + const char *pre; + char x; + pre = &aPrefix[infop->prefix]; + if (*bufpt != pre[0]) { + for (; (x = (*pre)) != 0; pre++) + *(--bufpt) = x; + } + } + length = (int)(&buf[etBUFSIZE - 1] - bufpt); + break; + case etFLOAT: + case etEXP: + case etGENERIC: + realvalue = va_arg(ap, double); +#ifndef KS_OMIT_FLOATING_POINT + if (precision < 0) + precision = 6; /* Set default precision */ + if (precision > etBUFSIZE / 2 - 10) + precision = etBUFSIZE / 2 - 10; + if (realvalue < 0.0) { + realvalue = -realvalue; + prefix = '-'; + } else { + if (flag_plussign) + prefix = '+'; + else if (flag_blanksign) + prefix = ' '; + else + prefix = 0; + } + if (xtype == etGENERIC && precision > 0) + precision--; +#if 0 + /* Rounding works like BSD when the constant 0.4999 is used. Wierd! */ + for (idx = precision, rounder = 0.4999; idx > 0; idx--, rounder *= 0.1); +#else + /* It makes more sense to use 0.5 */ + for (idx = precision, rounder = 0.5; idx > 0; idx--, rounder *= 0.1) { + } +#endif + if (xtype == etFLOAT) + realvalue += rounder; + /* Normalize realvalue to within 10.0 > realvalue >= 1.0 */ + exp = 0; + if (realvalue > 0.0) { + while (realvalue >= 1e32 && exp <= 350) { + realvalue *= 1e-32; + exp += 32; + } + while (realvalue >= 1e8 && exp <= 350) { + realvalue *= 1e-8; + exp += 8; + } + while (realvalue >= 10.0 && exp <= 350) { + realvalue *= 0.1; + exp++; + } + while (realvalue < 1e-8 && exp >= -350) { + realvalue *= 1e8; + exp -= 8; + } + while (realvalue < 1.0 && exp >= -350) { + realvalue *= 10.0; + exp--; + } + if (exp > 350 || exp < -350) { + bufpt = "NaN"; + length = 3; + break; + } + } + bufpt = buf; + /* + ** If the field type is etGENERIC, then convert to either etEXP + ** or etFLOAT, as appropriate. + */ + flag_exp = xtype == etEXP; + if (xtype != etFLOAT) { + realvalue += rounder; + if (realvalue >= 10.0) { + realvalue *= 0.1; + exp++; + } + } + if (xtype == etGENERIC) { + flag_rtz = !flag_alternateform; + if (exp < -4 || exp > precision) { + xtype = etEXP; + } else { + precision = precision - exp; + xtype = etFLOAT; + } + } else { + flag_rtz = 0; + } + if (xtype == etEXP) { + e2 = 0; + } else { + e2 = exp; + } + nsd = 0; + flag_dp = (precision > 0) | flag_alternateform | flag_altform2; + /* The sign in front of the number */ + if (prefix) { + *(bufpt++) = prefix; + } + /* Digits prior to the decimal point */ + if (e2 < 0) { + *(bufpt++) = '0'; + } else { + for (; e2 >= 0; e2--) { + *(bufpt++) = (char) et_getdigit(&realvalue, &nsd); + } + } + /* The decimal point */ + if (flag_dp) { + *(bufpt++) = '.'; + } + /* "0" digits after the decimal point but before the first + ** significant digit of the number */ + for (e2++; e2 < 0 && precision > 0; precision--, e2++) { + *(bufpt++) = '0'; + } + /* Significant digits after the decimal point */ + while ((precision--) > 0) { + *(bufpt++) = (char) et_getdigit(&realvalue, &nsd); + } + /* Remove trailing zeros and the "." if no digits follow the "." */ + if (flag_rtz && flag_dp) { + while (bufpt[-1] == '0') + *(--bufpt) = 0; + assert(bufpt > buf); + if (bufpt[-1] == '.') { + if (flag_altform2) { + *(bufpt++) = '0'; + } else { + *(--bufpt) = 0; + } + } + } + /* Add the "eNNN" suffix */ + if (flag_exp || (xtype == etEXP && exp)) { + *(bufpt++) = aDigits[infop->charset]; + if (exp < 0) { + *(bufpt++) = '-'; + exp = -exp; + } else { + *(bufpt++) = '+'; + } + if (exp >= 100) { + *(bufpt++) = (char) (exp / 100) + '0'; /* 100's digit */ + exp %= 100; + } + *(bufpt++) = (char) exp / 10 + '0'; /* 10's digit */ + *(bufpt++) = exp % 10 + '0'; /* 1's digit */ + } + *bufpt = 0; + + /* The converted number is in buf[] and zero terminated. Output it. + ** Note that the number is in the usual order, not reversed as with + ** integer conversions. */ + length = (int)(bufpt - buf); + bufpt = buf; + + /* Special case: Add leading zeros if the flag_zeropad flag is + ** set and we are not left justified */ + if (flag_zeropad && !flag_leftjustify && length < width) { + int i; + int nPad = width - length; + for (i = width; i >= nPad; i--) { + bufpt[i] = bufpt[i - nPad]; + } + i = prefix != 0; + while (nPad--) + bufpt[i++] = '0'; + length = width; + } +#endif + break; + case etSIZE: + *(va_arg(ap, int *)) = count; + length = width = 0; + break; + case etPERCENT: + buf[0] = '%'; + bufpt = buf; + length = 1; + break; + case etCHARLIT: + case etCHARX: + c = buf[0] = (char) (xtype == etCHARX ? va_arg(ap, int) : *++fmt); + if (precision >= 0) { + for (idx = 1; idx < precision; idx++) + buf[idx] = (char) c; + length = precision; + } else { + length = 1; + } + bufpt = buf; + break; + case etSTRING: + case etDYNSTRING: + bufpt = va_arg(ap, char *); + if (bufpt == 0) { + bufpt = ""; + } else if (xtype == etDYNSTRING) { + zExtra = bufpt; + } + length = (int)strlen(bufpt); + if (precision >= 0 && precision < length) + length = precision; + break; + case etSQLESCAPE: + case etSQLESCAPE2: + case etSQLESCAPE4: + case etSQLESCAPE3:{ + int i, j, n, ch, isnull; + int needQuote; + char *escarg = va_arg(ap, char *); + isnull = escarg == 0; + if (isnull) + escarg = (xtype == etSQLESCAPE2 ? "NULL" : "(NULL)"); + for (i = n = 0; (ch = escarg[i]) != 0; i++) { + if (ch == '\'' || (xtype == etSQLESCAPE3 && ch == '\\')) + n++; + } + needQuote = !isnull && xtype == etSQLESCAPE2; + n += i + 1 + needQuote * 2; + if (n > etBUFSIZE) { + bufpt = zExtra = malloc(n); + if (bufpt == 0) + return -1; + } else { + bufpt = buf; + } + j = 0; + if (needQuote) + bufpt[j++] = '\''; + for (i = 0; (ch = escarg[i]) != 0; i++) { + bufpt[j++] = (char) ch; + if (xtype == etSQLESCAPE4) { + if (ch == '\'' || (xtype == etSQLESCAPE3 && ch == '\\')) { + bufpt[j] = (char) ch; + bufpt[j-1] = (char) '\\'; + j++; + } + } else { + if (ch == '\'' || (xtype == etSQLESCAPE3 && ch == '\\')) + bufpt[j++] = (char) ch; + } + } + if (needQuote) + bufpt[j++] = '\''; + bufpt[j] = 0; + length = j; + /* The precision is ignored on %q and %Q */ + /* if ( precision>=0 && precisionz) { + (*func) (arg, (char *) pToken->z, pToken->n); + } + length = width = 0; + break; + } + case etSRCLIST:{ + SrcList *pSrc = va_arg(ap, SrcList *); + int k = va_arg(ap, int); + struct SrcList_item *pItem = &pSrc->a[k]; + assert(k >= 0 && k < pSrc->nSrc); + if (pItem->zDatabase && pItem->zDatabase[0]) { + (*func) (arg, pItem->zDatabase, strlen(pItem->zDatabase)); + (*func) (arg, ".", 1); + } + (*func) (arg, pItem->zName, strlen(pItem->zName)); + length = width = 0; + break; + } +#endif + } /* End switch over the format type */ + /* + ** The text of the conversion is pointed to by "bufpt" and is + ** "length" characters long. The field width is "width". Do + ** the output. + */ + if (!flag_leftjustify) { + register int nspace; + nspace = width - length; + if (nspace > 0) { + count += nspace; + while (nspace >= etSPACESIZE) { + (*func) (arg, spaces, etSPACESIZE); + nspace -= etSPACESIZE; + } + if (nspace > 0) + (*func) (arg, spaces, nspace); + } + } + if (length > 0) { + (*func) (arg, bufpt, length); + count += length; + } + if (flag_leftjustify) { + register int nspace; + nspace = width - length; + if (nspace > 0) { + count += nspace; + while (nspace >= etSPACESIZE) { + (*func) (arg, spaces, etSPACESIZE); + nspace -= etSPACESIZE; + } + if (nspace > 0) + (*func) (arg, spaces, nspace); + } + } + if (zExtra) { + free(zExtra); + } + } /* End for loop over the format string */ + return errorflag ? -1 : count; +} /* End of function */ + + +/* This structure is used to store state information about the +** write to memory that is currently in progress. +*/ +struct sgMprintf { + char *zBase; /* A base allocation */ + char *zText; /* The string collected so far */ + int nChar; /* Length of the string so far */ + int nTotal; /* Output size if unconstrained */ + int nAlloc; /* Amount of space allocated in zText */ + void *arg; /* Third arg to the realloc callback */ + void *(*xRealloc) (void *, int, void *); /* Function used to realloc memory */ +}; + +/* +** This function implements the callback from vxprintf. +** +** This routine add nNewChar characters of text in zNewText to +** the sgMprintf structure pointed to by "arg". +*/ +static void mout(void *arg, const char *zNewText, int nNewChar) +{ + struct sgMprintf *pM = (struct sgMprintf *) arg; + pM->nTotal += nNewChar; + if (pM->nChar + nNewChar + 1 > pM->nAlloc) { + if (pM->xRealloc == 0) { + nNewChar = pM->nAlloc - pM->nChar - 1; + } else { + pM->nAlloc = pM->nChar + nNewChar * 2 + 1; + if (pM->zText == pM->zBase) { + pM->zText = pM->xRealloc(0, pM->nAlloc, pM->arg); + if (pM->zText && pM->nChar) { + memcpy(pM->zText, pM->zBase, pM->nChar); + } + } else { + char *zNew; + zNew = pM->xRealloc(pM->zText, pM->nAlloc, pM->arg); + if (zNew) { + pM->zText = zNew; + } + } + } + } + if (pM->zText) { + if (nNewChar > 0) { + memcpy(&pM->zText[pM->nChar], zNewText, nNewChar); + pM->nChar += nNewChar; + } + pM->zText[pM->nChar] = 0; + } +} + +/* +** This routine is a wrapper around xprintf() that invokes mout() as +** the consumer. +*/ +static char *base_vprintf(void *(*xRealloc) (void *, int, void *), /* Routine to realloc memory. May be NULL */ + int useInternal, /* Use internal %-conversions if true */ + char *zInitBuf, /* Initially write here, before mallocing */ + int nInitBuf, /* Size of zInitBuf[] */ + const char *zFormat, /* format string */ + va_list ap, /* arguments */ + void *realloc_arg /*arg to pass to realloc function*/ + ) +{ + struct sgMprintf sM; + sM.zBase = sM.zText = zInitBuf; + sM.nChar = sM.nTotal = 0; + sM.nAlloc = nInitBuf; + sM.xRealloc = xRealloc; + sM.arg = realloc_arg; + vxprintf(mout, &sM, useInternal, zFormat, ap); + if (xRealloc) { + if (sM.zText == sM.zBase) { + sM.zText = xRealloc(0, sM.nChar + 1, realloc_arg); + if (sM.zText) { + memcpy(sM.zText, sM.zBase, sM.nChar + 1); + } + } else if (sM.nAlloc > sM.nChar + 10) { + char *zNew = xRealloc(sM.zText, sM.nChar + 1, realloc_arg); + if (zNew) { + sM.zText = zNew; + } + } + } + return sM.zText; +} + +/* +** Realloc that is a real function, not a macro. +*/ +static void *printf_realloc(void *old, int size, void *arg) +{ + return realloc(old, size); +} + +/* +** Print into memory. Omit the internal %-conversion extensions. +*/ +KS_DECLARE(char *) ks_vmprintf(const char *zFormat, va_list ap) +{ + char zBase[KS_PRINT_BUF_SIZE]; + return base_vprintf(printf_realloc, 0, zBase, sizeof(zBase), zFormat, ap, NULL); +} + +/* +** Print into memory. Omit the internal %-conversion extensions. +*/ +KS_DECLARE(char *) ks_mprintf(const char *zFormat, ...) +{ + va_list ap; + char *z; + char zBase[KS_PRINT_BUF_SIZE]; + va_start(ap, zFormat); + z = base_vprintf(printf_realloc, 0, zBase, sizeof(zBase), zFormat, ap, NULL); + va_end(ap); + return z; +} + +/* +** pool_realloc function +*/ +static void *pool_realloc(void *old, int size, void *arg) +{ + return ks_pool_resize(arg, old, size); +} + +/* +** Print into pool memory. Omit the internal %-conversion extensions. +*/ +KS_DECLARE(char *) ks_vpprintf(ks_pool_t *pool, const char *zFormat, va_list ap) +{ + char zBase[KS_PRINT_BUF_SIZE]; + return base_vprintf(pool_realloc, 0, zBase, sizeof(zBase), zFormat, ap, pool); +} + +/* +** Print into pool memory. Omit the internal %-conversion extensions. +*/ +KS_DECLARE(char *) ks_pprintf(ks_pool_t *pool, const char *zFormat, ...) +{ + va_list ap; + char *z; + char zBase[KS_PRINT_BUF_SIZE]; + va_start(ap, zFormat); + z = base_vprintf(pool_realloc, 0, zBase, sizeof(zBase), zFormat, ap, pool); + va_end(ap); + return z; +} + +/* +** ks_vsnprintf() works like vsnprintf() except that it ignores the +** current locale settings. This is important for SQLite because we +** are not able to use a "," as the decimal point in place of "." as +** specified by some locales. +*/ +KS_DECLARE(char *) ks_vsnprintfv(char *zBuf, int n, const char *zFormat, va_list ap) +{ + return base_vprintf(0, 0, zBuf, n, zFormat, ap, NULL); +} + +/* +** ks_snprintf() works like snprintf() except that it ignores the +** current locale settings. This is important for SQLite because we +** are not able to use a "," as the decimal point in place of "." as +** specified by some locales. +*/ + +KS_DECLARE(char *) ks_snprintfv(char *zBuf, int n, const char *zFormat, ...) +{ + char *z; + va_list ap; + + va_start(ap, zFormat); + z = base_vprintf(0, 0, zBuf, n, zFormat, ap, NULL); + va_end(ap); + return z; +} + +/* For Emacs: + * Local Variables: + * mode:c + * indent-tabs-mode:t + * tab-width:4 + * c-basic-offset:4 + * End: + * For VIM: + * vim:set softtabstop=4 shiftwidth=4 tabstop=4 noet: + */ diff --git a/libs/libks/src/ks_q.c b/libs/libks/src/ks_q.c new file mode 100644 index 0000000000..f8ab627213 --- /dev/null +++ b/libs/libks/src/ks_q.c @@ -0,0 +1,473 @@ +/* + * Copyright (c) 2007-2014, Anthony Minessale II + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * * Neither the name of the original author; nor the names of any contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER + * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include + +typedef struct ks_qnode_s { + void *ptr; + struct ks_qnode_s *next; + struct ks_qnode_s *prev; +} ks_qnode_t; + +struct ks_q_s { + ks_pool_t *pool; + ks_flush_fn_t flush_fn; + void *flush_data; + ks_size_t len; + ks_size_t maxlen; + ks_cond_t *pop_cond; + ks_cond_t *push_cond; + ks_mutex_t *list_mutex; + uint32_t pushers; + uint32_t poppers; + struct ks_qnode_s *head; + struct ks_qnode_s *tail; + struct ks_qnode_s *empty; + uint8_t active; +}; + +static void ks_q_cleanup(ks_pool_t *mpool, void *ptr, void *arg, int type, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t ctype) +{ + ks_q_t *q = (ks_q_t *) ptr; + ks_qnode_t *np, *fp; + + if (ctype == KS_MPCL_GLOBAL_FREE) { + return; + } + + switch(action) { + case KS_MPCL_ANNOUNCE: + break; + case KS_MPCL_TEARDOWN: + np = q->head; + while(np) { + fp = np; + np = np->next; + ks_pool_free(q->pool, fp); + } + + np = q->empty; + while(np) { + fp = np; + np = np->next; + ks_pool_free(q->pool, fp); + } + break; + case KS_MPCL_DESTROY: + ks_cond_destroy(&q->pop_cond); + ks_cond_destroy(&q->push_cond); + ks_mutex_destroy(&q->list_mutex); + break; + } +} + +KS_DECLARE(ks_status_t) ks_q_flush(ks_q_t *q) +{ + void *ptr; + + if (!q->active) return KS_STATUS_INACTIVE; + if (!q->flush_fn) return KS_STATUS_FAIL; + + while(ks_q_trypop(q, &ptr) == KS_STATUS_SUCCESS) { + q->flush_fn(q, ptr, q->flush_data); + } + + return KS_STATUS_SUCCESS; +} + +KS_DECLARE(ks_status_t) ks_q_set_flush_fn(ks_q_t *q, ks_flush_fn_t fn, void *flush_data) +{ + if (!q->active) return KS_STATUS_INACTIVE; + + q->flush_fn = fn; + q->flush_data = flush_data; + + return KS_STATUS_SUCCESS; +} + +KS_DECLARE(ks_status_t) ks_q_wake(ks_q_t *q) +{ + ks_mutex_lock(q->list_mutex); + ks_cond_broadcast(q->push_cond); + ks_cond_broadcast(q->pop_cond); + ks_mutex_unlock(q->list_mutex); + + return KS_STATUS_SUCCESS; +} + +KS_DECLARE(ks_size_t) ks_q_term(ks_q_t *q) +{ + int active; + + ks_mutex_lock(q->list_mutex); + active = q->active; + q->active = 0; + ks_mutex_unlock(q->list_mutex); + + if (active) { + ks_q_wake(q); + } + + return active ? KS_STATUS_SUCCESS : KS_STATUS_INACTIVE; +} + +KS_DECLARE(ks_size_t) ks_q_size(ks_q_t *q) +{ + ks_size_t size; + + ks_mutex_lock(q->list_mutex); + size = q->len; + ks_mutex_unlock(q->list_mutex); + + return size; +} + +KS_DECLARE(ks_status_t) ks_q_destroy(ks_q_t **qP) +{ + ks_q_t *q; + ks_pool_t *pool; + + ks_assert(qP); + + q = *qP; + *qP = NULL; + + if (q) { + ks_q_flush(q); + ks_q_term(q); + + pool = q->pool; + ks_pool_free(pool, q); + pool = NULL; + + return KS_STATUS_SUCCESS; + } + + return KS_STATUS_FAIL; +} + +KS_DECLARE(ks_status_t) ks_q_create(ks_q_t **qP, ks_pool_t *pool, ks_size_t maxlen) +{ + ks_q_t *q = NULL; + + q = ks_pool_alloc(pool, sizeof(*q)); + ks_assert(q); + + q->pool = pool; + + + ks_mutex_create(&q->list_mutex, KS_MUTEX_FLAG_DEFAULT, pool); + ks_assert(q->list_mutex); + + ks_cond_create_ex(&q->pop_cond, pool, q->list_mutex); + ks_assert(q->pop_cond); + + ks_cond_create_ex(&q->push_cond, pool, q->list_mutex); + ks_assert(q->push_cond); + + q->maxlen = maxlen; + q->active = 1; + + ks_pool_set_cleanup(pool, q, NULL, 0, ks_q_cleanup); + + *qP = q; + + return KS_STATUS_SUCCESS; +} + +static ks_qnode_t *new_node(ks_q_t *q) +{ + ks_qnode_t *np; + + if (q->empty) { + np = q->empty; + q->empty = q->empty->next; + } else { + np = ks_pool_alloc(q->pool, sizeof(*np)); + } + + np->prev = np->next = NULL; + np->ptr = NULL; + + return np; +} + +static ks_status_t do_push(ks_q_t *q, void *ptr) +{ + ks_qnode_t *node; + + ks_mutex_lock(q->list_mutex); + if (!q->active) { + ks_mutex_unlock(q->list_mutex); + return KS_STATUS_INACTIVE; + } + + node = new_node(q); + node->ptr = ptr; + + if (!q->head) { + q->head = q->tail = node; + } else { + q->tail->next = node; + node->prev = q->tail; + q->tail = node; + } + q->len++; + ks_mutex_unlock(q->list_mutex); + + return KS_STATUS_SUCCESS; +} + + +KS_DECLARE(ks_status_t) ks_q_push(ks_q_t *q, void *ptr) +{ + ks_status_t r; + + ks_mutex_lock(q->list_mutex); + if (q->active == 0) { + r = KS_STATUS_INACTIVE; + goto end; + } + + + if (q->maxlen && q->len == q->maxlen) { + q->pushers++; + ks_cond_wait(q->push_cond); + q->pushers--; + + if (q->maxlen && q->len == q->maxlen) { + if (!q->active) { + r = KS_STATUS_INACTIVE; + } else { + r = KS_STATUS_BREAK; + } + goto end; + } + } + + r = do_push(q, ptr); + + if (q->poppers) { + ks_cond_signal(q->pop_cond); + } + + end: + + ks_mutex_unlock(q->list_mutex); + return r; +} + +KS_DECLARE(ks_status_t) ks_q_trypush(ks_q_t *q, void *ptr) +{ + ks_status_t r; + + ks_mutex_lock(q->list_mutex); + if (q->active == 0) { + r = KS_STATUS_INACTIVE; + goto end; + } + + if (q->maxlen && q->len == q->maxlen) { + r = KS_STATUS_BREAK; + goto end; + } + + r = do_push(q, ptr); + + if (q->poppers) { + ks_cond_signal(q->pop_cond); + } + + end: + + ks_mutex_unlock(q->list_mutex); + + return r; +} + +static ks_status_t do_pop(ks_q_t *q, void **ptr) +{ + ks_qnode_t *np; + + ks_mutex_lock(q->list_mutex); + + if (!q->active) { + ks_mutex_unlock(q->list_mutex); + return KS_STATUS_INACTIVE; + } + + if (!q->head) { + *ptr = NULL; + } else { + np = q->head; + if ((q->head = q->head->next)) { + q->head->prev = NULL; + } + + *ptr = np->ptr; + + np->next = q->empty; + np->prev = NULL; + np->ptr = NULL; + q->empty = np; + } + + q->len--; + ks_mutex_unlock(q->list_mutex); + + return KS_STATUS_SUCCESS; +} + +KS_DECLARE(ks_status_t) ks_q_pop_timeout(ks_q_t *q, void **ptr, uint32_t timeout) +{ + ks_status_t r; + + ks_mutex_lock(q->list_mutex); + + if (!q->active) { + r = KS_STATUS_INACTIVE; + goto end; + } + + if (q->len == 0) { + if (q->active) { + q->poppers++; + if (timeout) { + r = ks_cond_timedwait(q->pop_cond, timeout); + } else { + r = ks_cond_wait(q->pop_cond); + } + q->poppers--; + + if (timeout && r != KS_STATUS_SUCCESS) { + goto end; + } + } + + if (q->len == 0) { + if (!q->active) { + r = KS_STATUS_INACTIVE; + } else { + r = KS_STATUS_BREAK; + } + goto end; + } + } + + r = do_pop(q, ptr); + + if (q->pushers) { + ks_cond_signal(q->push_cond); + } + + end: + + ks_mutex_unlock(q->list_mutex); + + return r; + +} + +KS_DECLARE(ks_status_t) ks_q_pop(ks_q_t *q, void **ptr) +{ + return ks_q_pop_timeout(q, ptr, 0); +} + +KS_DECLARE(ks_status_t) ks_q_trypop(ks_q_t *q, void **ptr) +{ + ks_status_t r; + + ks_mutex_lock(q->list_mutex); + + if (!q->active) { + r = KS_STATUS_INACTIVE; + goto end; + } + + if (q->len == 0) { + r = KS_STATUS_BREAK; + goto end; + } + + r = do_pop(q, ptr); + + if (q->pushers) { + ks_cond_signal(q->push_cond); + } + + end: + + ks_mutex_unlock(q->list_mutex); + + return r; + +} + + + + +KS_DECLARE(ks_status_t) ks_q_wait(ks_q_t *q) +{ + ks_status_t r = KS_STATUS_SUCCESS; + int done = 0; + + do { + ks_mutex_lock(q->list_mutex); + + if (!q->active) { + r = KS_STATUS_INACTIVE; + done = 1; + } + + if (q->len == 0) { + done = 1; + } + + ks_mutex_unlock(q->list_mutex); + + } while (!done); + + return r; +} + + +/* For Emacs: + * Local Variables: + * mode:c + * indent-tabs-mode:t + * tab-width:4 + * c-basic-offset:4 + * End: + * For VIM: + * vim:set softtabstop=4 shiftwidth=4 tabstop=4 noet: + */ diff --git a/libs/libks/src/ks_rng.c b/libs/libks/src/ks_rng.c new file mode 100644 index 0000000000..f98bf5cd5a --- /dev/null +++ b/libs/libks/src/ks_rng.c @@ -0,0 +1,251 @@ +/* + * Cross Platform random/uuid abstraction + * Copyright(C) 2015 Michael Jerris + * + * You may opt to use, copy, modify, merge, publish, distribute and/or sell + * copies of the Software, and permit persons to whom the Software is + * furnished to do so. + * + * This work is provided under this license on an "as is" basis, without warranty of any kind, + * either expressed or implied, including, without limitation, warranties that the covered code + * is free of defects, merchantable, fit for a particular purpose or non-infringing. The entire + * risk as to the quality and performance of the covered code is with you. Should any covered + * code prove defective in any respect, you (not the initial developer or any other contributor) + * assume the cost of any necessary servicing, repair or correction. This disclaimer of warranty + * constitutes an essential part of this license. No use of any covered code is authorized hereunder + * except under this disclaimer. + * + */ + +#include "ks.h" +#include "sodium.h" +#include +#include + +static ks_bool_t initialized = KS_FALSE; +static ks_mutex_t *rng_mutex = NULL; +static sha512_ctx global_sha512; + +#ifdef __WINDOWS__ +#include +HCRYPTPROV crypt_provider; +#else +int fd = -1; +#endif + +/* + * memset_volatile is a volatile pointer to the memset function. + * You can call (*memset_volatile)(buf, val, len) or even + * memset_volatile(buf, val, len) just as you would call + * memset(buf, val, len), but the use of a volatile pointer + * guarantees that the compiler will not optimise the call away. + */ +//static void * (*volatile memset_volatile)(void *, int, size_t) = memset; + +KS_DECLARE(uuid_t *) ks_uuid(uuid_t *uuid) +{ +#ifdef __WINDOWS__ + UuidCreate ( uuid ); +#else + uuid_generate_random ( *uuid ); +#endif + return uuid; +} + +KS_DECLARE(char *) ks_uuid_str(ks_pool_t *pool, uuid_t *uuid) +{ + char *uuidstr = ks_pool_alloc(pool, 37); +#ifdef __WINDOWS__ + unsigned char * str; + UuidToStringA ( uuid, &str ); + uuidstr = ks_pstrdup(pool, str); + RpcStringFreeA ( &str ); +#else + char str[37] = { 0 }; + uuid_unparse ( *uuid, str ); + uuidstr = ks_pstrdup(pool, str); +#endif + return uuidstr; +} + +KS_DECLARE(ks_status_t) ks_rng_init(void) +{ + if (!initialized) { + if (sodium_init() == -1) { + abort(); + } + + + randombytes_random(); + ks_aes_init(); + ks_mutex_create(&rng_mutex, KS_MUTEX_FLAG_DEFAULT, ks_global_pool()); +#ifdef __WINDOWS__ + if (!crypt_provider) { + if (CryptAcquireContext(&crypt_provider, NULL, NULL, PROV_RSA_FULL, CRYPT_VERIFYCONTEXT) == TRUE) { + initialized = KS_TRUE; + } else { + initialized = KS_FALSE; + } + } +#else + if (fd < 0) { + fd = open("/dev/urandom", O_RDONLY); + if (fd < 0) { + fd = open("/dev/random", O_RDONLY); + } + } + initialized = KS_TRUE; +#endif + } + + sha512_begin(&global_sha512); + + if (initialized) { + return KS_STATUS_SUCCESS; + } else { + return KS_STATUS_FAIL; + } +} + +KS_DECLARE(ks_status_t) ks_rng_shutdown(void) +{ + + initialized = KS_FALSE; +#ifdef __WINDOWS__ + if (crypt_provider) { + CryptReleaseContext(crypt_provider, 0); + crypt_provider = 0; + } +#else + if (fd >= 0) { + close(fd); + fd = -1; + } +#endif + return KS_STATUS_SUCCESS; +} + +KS_DECLARE(size_t) ks_rng_seed_data(uint8_t *seed, size_t length) +{ + size_t bytes = 0; + + if (!initialized && (ks_rng_init() != KS_STATUS_SUCCESS)) { + return bytes; + } +#ifdef __WINDOWS__ + if (crypt_provider) { + if(!CryptGenRandom(crypt_provider, length, seed)) { + return 0; + } + bytes = length; + } +#else + if (fd >= 0) { + bytes = read(fd, seed, length); + } else { + } +#endif + return bytes; +} + +KS_DECLARE(size_t) ks_rng_add_entropy(const uint8_t *buffer, size_t length) +{ + + uint8_t seed[64]; + size_t len = ks_rng_seed_data(seed, sizeof(seed)); + + ks_mutex_lock(rng_mutex); + + if (!initialized) { + ks_rng_init(); + } + + if (buffer && length) { + sha512_hash(buffer, length, &global_sha512); + } + + if (len > 0) { + sha512_hash(seed, len, &global_sha512); + length += len; + } + + ks_mutex_unlock(rng_mutex); + + return length; +} + +KS_DECLARE(size_t) ks_rng_get_data(uint8_t* buffer, size_t length) { + randombytes_buf(buffer, length); + return length; +} + + +#if 0 + +KS_DECLARE(size_t) ks_rng_get_data(uint8_t* buffer, size_t length) { + + aes_encrypt_ctx cx[1]; + sha512_ctx random_context; + uint8_t md[SHA512_DIGEST_SIZE]; + uint8_t ctr[AES_BLOCK_SIZE]; + uint8_t rdata[AES_BLOCK_SIZE]; + size_t generated = length; + + /* Add entropy from system state. We will include whatever happens to be in the buffer, it can't hurt */ + ks_rng_add_entropy(buffer, length); + + ks_mutex_lock(rng_mutex); + + /* Copy the mainCtx and finalize it into the md buffer */ + memcpy(&random_context, &global_sha512, sizeof(sha512_ctx)); + sha512_end(md, &random_context); + + ks_mutex_lock(rng_mutex); + + /* Key an AES context from this buffer */ + aes_encrypt_key256(md, cx); + + /* Initialize counter, using excess from md if available */ + memset (ctr, 0, sizeof(ctr)); + uint32_t ctrbytes = AES_BLOCK_SIZE; + memcpy(ctr + sizeof(ctr) - ctrbytes, md + 32, ctrbytes); + + /* Encrypt counter, copy to destination buffer, increment counter */ + while (length) { + uint8_t *ctrptr; + size_t copied; + aes_encrypt(ctr, rdata, cx); + copied = (sizeof(rdata) < length) ? sizeof(rdata) : length; + memcpy (buffer, rdata, copied); + buffer += copied; + length -= copied; + + /* Increment counter */ + ctrptr = ctr + sizeof(ctr) - 1; + while (ctrptr >= ctr) { + if ((*ctrptr-- += 1) != 0) { + break; + } + } + } + memset_volatile(&random_context, 0, sizeof(random_context)); + memset_volatile(md, 0, sizeof(md)); + memset_volatile(&cx, 0, sizeof(cx)); + memset_volatile(ctr, 0, sizeof(ctr)); + memset_volatile(rdata, 0, sizeof(rdata)); + + return generated; +} + +#endif + +/* For Emacs: + * Local Variables: + * mode:c + * indent-tabs-mode:t + * tab-width:4 + * c-basic-offset:4 + * End: + * For VIM: + * vim:set softtabstop=4 shiftwidth=4 tabstop=4 noet: + */ diff --git a/libs/libks/src/ks_socket.c b/libs/libks/src/ks_socket.c new file mode 100644 index 0000000000..f97c7ba823 --- /dev/null +++ b/libs/libks/src/ks_socket.c @@ -0,0 +1,1042 @@ +/* + * Copyright (c) 2007-2014, Anthony Minessale II + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * * Neither the name of the original author; nor the names of any contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER + * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + +/* Use select on windows and poll everywhere else. + Select is the devil. Especially if you are doing a lot of small socket connections. + If your FD number is bigger than 1024 you will silently create memory corruption. + + If you have build errors on your platform because you don't have poll find a way to detect it and #define KS_USE_SELECT and #undef KS_USE_POLL + All of this will be upgraded to autoheadache eventually. +*/ + +/* TBD for win32 figure out how to tell if you have WSAPoll (vista or higher) and use it when available by #defining KS_USE_WSAPOLL (see below) */ + +#ifdef _MSC_VER +#define FD_SETSIZE 8192 +//#define KS_USE_SELECT +#else +#define KS_USE_POLL +#endif + +#include + +#ifndef WIN32 +#define closesocket(s) close(s) +#else /* WIN32 */ + +#pragma warning (disable:6386) +/* These warnings need to be ignored warning in sdk header */ +#include +#include +#pragma comment(lib, "Ws2_32.lib") + +#ifndef errno +#define errno WSAGetLastError() +#endif + +#ifndef EINTR +#define EINTR WSAEINTR +#endif + +#pragma warning (default:6386) + +#endif /* WIN32 */ + +#ifdef KS_USE_POLL +#include +#endif + +KS_DECLARE(ks_status_t) ks_socket_option(ks_socket_t socket, int option_name, ks_bool_t enabled) +{ + int result = -1; + ks_status_t status = KS_STATUS_FAIL; +#ifdef WIN32 + BOOL opt = TRUE; + if (!enabled) opt = FALSE; +#else + int opt = 1; + if (!enabled) opt = 0; +#endif + + switch(option_name) { + case SO_REUSEADDR: + case TCP_NODELAY: + case SO_KEEPALIVE: + case SO_LINGER: +#ifdef WIN32 + result = setsockopt(socket, SOL_SOCKET, option_name, (char *) &opt, sizeof(opt)); +#else + result = setsockopt(socket, SOL_SOCKET, option_name, &opt, sizeof(opt)); +#endif + if (!result) status = KS_STATUS_SUCCESS; + break; + case KS_SO_NONBLOCK: + { +#ifdef WIN32 + u_long val = (u_long)!!opt; + if (ioctlsocket(socket, FIONBIO, &val) != SOCKET_ERROR) { + status = KS_STATUS_SUCCESS; + } +#else + int flags = fcntl(socket, F_GETFL, 0); + if (opt) { + flags |= O_NONBLOCK; + } else { + flags &= ~O_NONBLOCK; + } + if (fcntl(socket, F_SETFL, flags) != -1) { + status = KS_STATUS_SUCCESS; + } +#endif + } + break; + default: + break; + } + + return status; +} + +KS_DECLARE(ks_status_t) ks_socket_sndbuf(ks_socket_t socket, int bufsize) +{ + int result; + ks_status_t status = KS_STATUS_FAIL; + +#ifdef WIN32 + result = setsockopt(socket, SOL_SOCKET, SO_SNDBUF, (char *) &bufsize, sizeof(bufsize)); +#else + result = setsockopt(socket, SOL_SOCKET, SO_SNDBUF, &bufsize, sizeof(bufsize)); +#endif + if (!result) status = KS_STATUS_SUCCESS; + + return status; +} + +KS_DECLARE(ks_status_t) ks_socket_rcvbuf(ks_socket_t socket, int bufsize) +{ + int result; + ks_status_t status = KS_STATUS_FAIL; + +#ifdef WIN32 + result = setsockopt(socket, SOL_SOCKET, SO_RCVBUF, (char *) &bufsize, sizeof(bufsize)); +#else + result = setsockopt(socket, SOL_SOCKET, SO_RCVBUF, &bufsize, sizeof(bufsize)); +#endif + if (!result) status = KS_STATUS_SUCCESS; + + return status; +} + +static int ks_socket_reuseaddr(ks_socket_t socket) +{ +#ifdef WIN32 + BOOL reuse_addr = TRUE; + return setsockopt(socket, SOL_SOCKET, SO_REUSEADDR, (char *) &reuse_addr, sizeof(reuse_addr)); +#else + int reuse_addr = 1; + return setsockopt(socket, SOL_SOCKET, SO_REUSEADDR, &reuse_addr, sizeof(reuse_addr)); +#endif +} + +KS_DECLARE(ks_status_t) ks_socket_shutdown(ks_socket_t sock, int how) +{ + return shutdown(sock, how) ? KS_STATUS_FAIL : KS_STATUS_SUCCESS; +} + +KS_DECLARE(ks_status_t) ks_socket_close(ks_socket_t *sock) +{ + ks_assert(sock); + + if (*sock != KS_SOCK_INVALID) { + closesocket(*sock); + *sock = KS_SOCK_INVALID; + return KS_STATUS_SUCCESS; + } + + return KS_STATUS_FAIL; +} + +KS_DECLARE(ks_socket_t) ks_socket_connect(int type, int protocol, ks_sockaddr_t *addr) +{ + ks_socket_t sock = KS_SOCK_INVALID; + + ks_assert(addr); + ks_assert(addr->family == AF_INET || addr->family == AF_INET6); + + if ((sock = socket(addr->family, type, protocol)) == KS_SOCK_INVALID) { + return KS_SOCK_INVALID; + } + + if (addr->family == AF_INET) { + if (connect(sock, (struct sockaddr *)&addr->v.v4, sizeof(addr->v.v4))) { + ks_socket_close(&sock); + return KS_SOCK_INVALID; + } + } else { + if (connect(sock, (struct sockaddr *)&addr->v.v6, sizeof(addr->v.v6))) { + ks_socket_close(&sock); + return KS_SOCK_INVALID; + } + } + + return sock; +} + +KS_DECLARE(ks_status_t) ks_addr_bind(ks_socket_t server_sock, ks_sockaddr_t *addr) +{ + ks_status_t status = KS_STATUS_SUCCESS; + + ks_assert(addr); + ks_assert(addr->family == AF_INET || addr->family == AF_INET6); + + if (addr->family == AF_INET) { + if (bind(server_sock, (struct sockaddr *) &addr->v.v4, sizeof(addr->v.v4)) < 0) { + status = KS_STATUS_FAIL; + } + } else { + if (bind(server_sock, (struct sockaddr *) &addr->v.v6, sizeof(addr->v.v6)) < 0) { + status = KS_STATUS_FAIL; + } + } + + return status; +} + +KS_DECLARE(const char *) ks_addr_get_host(ks_sockaddr_t *addr) +{ + ks_assert(addr); + ks_assert(addr->family == AF_INET || addr->family == AF_INET6); + + if (addr->family == AF_INET) { + inet_ntop(AF_INET, &addr->v.v4.sin_addr, addr->host, sizeof(addr->host)); + } else { + inet_ntop(AF_INET6, &addr->v.v6.sin6_addr, addr->host, sizeof(addr->host)); + } + + return (const char *) addr->host; +} + +KS_DECLARE(ks_port_t) ks_addr_get_port(ks_sockaddr_t *addr) +{ + ks_assert(addr); + ks_assert(addr->family == AF_INET || addr->family == AF_INET6); + + if (addr->family == AF_INET) { + addr->port = ntohs(addr->v.v4.sin_port); + } else { + addr->port = ntohs(addr->v.v6.sin6_port); + } + + return addr->port; +} + +KS_DECLARE(int) ks_addr_cmp(const ks_sockaddr_t *sa1, const ks_sockaddr_t *sa2) +{ + + if (!(sa1 && sa2)) { + return 0; + } + + if (sa1->family != sa2->family) { + return 0; + } + + switch (sa1->family) { + case AF_INET: + return (sa1->v.v4.sin_addr.s_addr == sa2->v.v4.sin_addr.s_addr && sa1->v.v4.sin_port == sa2->v.v4.sin_port); + case AF_INET6: + { + int i; + + if (sa1->v.v6.sin6_port != sa2->v.v6.sin6_port) { + return 0; + } + + for (i = 0; i < 4; i++) { + if (*((int32_t *) &sa1->v.v6.sin6_addr + i) != *((int32_t *) &sa2->v.v6.sin6_addr + i)) { + return 0; + } + } + + return 1; + } + } + + return 0; +} + +KS_DECLARE(ks_status_t) ks_addr_copy(ks_sockaddr_t *addr, const ks_sockaddr_t *src_addr) +{ + ks_status_t status = KS_STATUS_SUCCESS; + + ks_assert(addr); + ks_assert(src_addr); + ks_assert(src_addr->family == AF_INET || src_addr->family == AF_INET6); + + addr->family = src_addr->family; + + if (src_addr->family == AF_INET) { + memcpy(&addr->v.v4, &src_addr->v.v4, sizeof(src_addr->v.v4)); + } else { + memcpy(&addr->v.v6, &src_addr->v.v6, sizeof(src_addr->v.v6)); + } + + ks_addr_get_host(addr); + ks_addr_get_port(addr); + + return status; +} + + +KS_DECLARE(ks_status_t) ks_addr_set(ks_sockaddr_t *addr, const char *host, ks_port_t port, int family) +{ + ks_status_t status = KS_STATUS_SUCCESS; + + ks_assert(addr); + + if (family != PF_INET && family != PF_INET6) family = PF_INET; + if (host && strchr(host, ':')) family = PF_INET6; + + memset(addr, 0, sizeof(*addr)); + + if (family == PF_INET) { + addr->family = AF_INET; + addr->v.v4.sin_family = AF_INET; + addr->v.v4.sin_addr.s_addr = host ? inet_addr(host): htonl(INADDR_ANY); + addr->v.v4.sin_port = htons(port); + } else { + addr->family = AF_INET6; + addr->v.v6.sin6_family = AF_INET6; + addr->v.v6.sin6_port = htons(port); + if (host) { + inet_pton(AF_INET6, host, &(addr->v.v6.sin6_addr)); + } else { + addr->v.v6.sin6_addr = in6addr_any; + } + } + + ks_addr_get_host(addr); + ks_addr_get_port(addr); + + return status; +} + + +KS_DECLARE(ks_status_t) ks_addr_set_raw(ks_sockaddr_t *addr, void *data, ks_port_t port, int family) +{ + ks_status_t status = KS_STATUS_SUCCESS; + + ks_assert(addr); + + if (family != PF_INET && family != PF_INET6) family = PF_INET; + + memset(addr, 0, sizeof(*addr)); + + if (family == PF_INET) { + addr->family = AF_INET; + addr->v.v4.sin_family = AF_INET; + memcpy(&(addr->v.v4.sin_addr), data, 4); + addr->v.v4.sin_port = port; + } else { + addr->family = AF_INET6; + addr->v.v6.sin6_family = AF_INET6; + addr->v.v6.sin6_port = port; + memcpy(&(addr->v.v6.sin6_addr), data, 16); + } + + ks_addr_get_host(addr); + ks_addr_get_port(addr); + + return status; +} + + +KS_DECLARE(ks_status_t) ks_listen_sock(ks_socket_t server_sock, ks_sockaddr_t *addr, int backlog, ks_listen_callback_t callback, void *user_data) +{ + ks_status_t status = KS_STATUS_SUCCESS; + + + ks_socket_reuseaddr(server_sock); + + if (ks_addr_bind(server_sock, addr) != KS_STATUS_SUCCESS) { + status = KS_STATUS_FAIL; + goto end; + } + + if (!backlog) backlog = 10000; + + if (listen(server_sock, backlog) < 0) { + status = KS_STATUS_FAIL; + goto end; + } + + for (;;) { + ks_socket_t client_sock; + ks_sockaddr_t remote_addr; + socklen_t slen = 0; + + if (addr->family == PF_INET) { + slen = sizeof(remote_addr.v.v4); + if ((client_sock = accept(server_sock, (struct sockaddr *) &remote_addr.v.v4, &slen)) == KS_SOCK_INVALID) { + status = KS_STATUS_FAIL; + goto end; + } + remote_addr.family = AF_INET; + } else { + slen = sizeof(remote_addr.v.v6); + if ((client_sock = accept(server_sock, (struct sockaddr *) &remote_addr.v.v6, &slen)) == KS_SOCK_INVALID) { + status = KS_STATUS_FAIL; + goto end; + } + remote_addr.family = AF_INET6; + } + + ks_addr_get_host(&remote_addr); + ks_addr_get_port(&remote_addr); + + callback(server_sock, client_sock, &remote_addr, user_data); + } + + end: + + if (server_sock != KS_SOCK_INVALID) { + ks_socket_shutdown(server_sock, 2); + ks_socket_close(&server_sock); + server_sock = KS_SOCK_INVALID; + } + + return status; +} + +KS_DECLARE(ks_status_t) ks_listen(const char *host, ks_port_t port, int family, int backlog, ks_listen_callback_t callback, void *user_data) +{ + ks_socket_t server_sock = KS_SOCK_INVALID; + ks_sockaddr_t addr = { 0 }; + + if (family != PF_INET && family != PF_INET6) family = PF_INET; + if (host && strchr(host, ':')) family = PF_INET6; + + if (ks_addr_set(&addr, host, port, family) != KS_STATUS_SUCCESS) { + return KS_STATUS_FAIL; + } + + if ((server_sock = socket(family, SOCK_STREAM, IPPROTO_TCP)) == KS_SOCK_INVALID) { + return KS_STATUS_FAIL; + } + + return ks_listen_sock(server_sock, &addr, backlog, callback, user_data); +} + +KS_DECLARE(int) ks_poll(struct pollfd fds[], uint32_t nfds, int timeout) +{ +#ifdef WIN32 + return WSAPoll(fds, nfds, timeout); +#else + return poll(fds, nfds, timeout); +#endif +} + +#ifdef KS_USE_SELECT +#ifdef WIN32 +#pragma warning( push ) +#pragma warning( disable : 6262 ) /* warning C6262: Function uses '98348' bytes of stack: exceeds /analyze:stacksize'16384'. Consider moving some data to heap */ +#endif +KS_DECLARE(int) ks_wait_sock(ks_socket_t sock, uint32_t ms, ks_poll_t flags) +{ + int s = 0, r = 0; + fd_set rfds; + fd_set wfds; + fd_set efds; + struct timeval tv; + + FD_ZERO(&rfds); + FD_ZERO(&wfds); + FD_ZERO(&efds); + +#ifndef WIN32 + /* Wouldn't you rather know?? */ + assert(sock <= FD_SETSIZE); +#endif + + if ((flags & KS_POLL_READ)) { + +#ifdef WIN32 +#pragma warning( push ) +#pragma warning( disable : 4127 ) +#pragma warning( disable : 4548 ) + FD_SET(sock, &rfds); +#pragma warning( pop ) +#else + FD_SET(sock, &rfds); +#endif + } + + if ((flags & KS_POLL_WRITE)) { + +#ifdef WIN32 +#pragma warning( push ) +#pragma warning( disable : 4127 ) +#pragma warning( disable : 4548 ) + FD_SET(sock, &wfds); +#pragma warning( pop ) +#else + FD_SET(sock, &wfds); +#endif + } + + if ((flags & KS_POLL_ERROR)) { + +#ifdef WIN32 +#pragma warning( push ) +#pragma warning( disable : 4127 ) +#pragma warning( disable : 4548 ) + FD_SET(sock, &efds); +#pragma warning( pop ) +#else + FD_SET(sock, &efds); +#endif + } + + tv.tv_sec = ms / 1000; + tv.tv_usec = (ms % 1000) * ms; + + s = select((int)sock + 1, (flags & KS_POLL_READ) ? &rfds : NULL, (flags & KS_POLL_WRITE) ? &wfds : NULL, (flags & KS_POLL_ERROR) ? &efds : NULL, &tv); + + if (s < 0) { + r = s; + } else if (s > 0) { + if ((flags & KS_POLL_READ) && FD_ISSET(sock, &rfds)) { + r |= KS_POLL_READ; + } + + if ((flags & KS_POLL_WRITE) && FD_ISSET(sock, &wfds)) { + r |= KS_POLL_WRITE; + } + + if ((flags & KS_POLL_ERROR) && FD_ISSET(sock, &efds)) { + r |= KS_POLL_ERROR; + } + } + + return r; + +} + +#ifdef WIN32 +#pragma warning( pop ) +#endif +#endif + +#if defined(KS_USE_POLL) || defined(WIN32) +KS_DECLARE(int) ks_wait_sock(ks_socket_t sock, uint32_t ms, ks_poll_t flags) +{ + struct pollfd pfds[2] = { {0} }; + int s = 0, r = 0; + + pfds[0].fd = sock; + + if ((flags & KS_POLL_READ)) { + pfds[0].events |= POLLIN; + } + + if ((flags & KS_POLL_WRITE)) { + pfds[0].events |= POLLOUT; + } + + if ((flags & KS_POLL_ERROR)) { + pfds[0].events |= POLLERR; + } + + s = ks_poll(pfds, 1, ms); + + if (s < 0) { + r = s; + } else if (s > 0) { + if ((pfds[0].revents & POLLIN)) { + r |= KS_POLL_READ; + } + if ((pfds[0].revents & POLLOUT)) { + r |= KS_POLL_WRITE; + } + if ((pfds[0].revents & POLLERR)) { + r |= KS_POLL_ERROR; + } + } + + return r; + +} +#endif + + +#ifdef HAVE_GETIFADDRS +#include +static int get_netmask(struct sockaddr_in *me, int *mask) +{ + struct ifaddrs *ifaddrs, *i = NULL; + + if (!me || getifaddrs(&ifaddrs) < 0) { + return -1; + } + + for (i = ifaddrs; i; i = i->ifa_next) { + struct sockaddr_in *s = (struct sockaddr_in *) i->ifa_addr; + struct sockaddr_in *m = (struct sockaddr_in *) i->ifa_netmask; + + if (s && m && s->sin_family == AF_INET && s->sin_addr.s_addr == me->sin_addr.s_addr) { + *mask = m->sin_addr.s_addr; + freeifaddrs(ifaddrs); + return 0; + } + } + + freeifaddrs(ifaddrs); + + return -2; +} +#elif defined(__linux__) + +#include +#include +static int get_netmask(struct sockaddr_in *me, int *mask) +{ + + static struct ifreq ifreqs[20] = { {{{0}}} }; + struct ifconf ifconf; + int nifaces, i; + int sock; + int r = -1; + + memset(&ifconf, 0, sizeof(ifconf)); + ifconf.ifc_buf = (char *) (ifreqs); + ifconf.ifc_len = sizeof(ifreqs); + + + if ((sock = socket(AF_INET, SOCK_STREAM, 0)) < 0) { + goto end; + } + + if (ioctl(sock, SIOCGIFCONF, (char *) &ifconf) < 0) { + goto end; + } + + nifaces = ifconf.ifc_len / sizeof(struct ifreq); + + for (i = 0; i < nifaces; i++) { + struct sockaddr_in *sin = NULL; + struct in_addr ip; + + ioctl(sock, SIOCGIFADDR, &ifreqs[i]); + sin = (struct sockaddr_in *) &ifreqs[i].ifr_addr; + ip = sin->sin_addr; + + if (ip.s_addr == me->sin_addr.s_addr) { + ioctl(sock, SIOCGIFNETMASK, &ifreqs[i]); + sin = (struct sockaddr_in *) &ifreqs[i].ifr_addr; + /* mask = sin->sin_addr; */ + *mask = sin->sin_addr.s_addr; + r = 0; + break; + } + + } + + end: + + close(sock); + return r; + +} + +#elif defined(WIN32) + +static int get_netmask(struct sockaddr_in *me, int *mask) +{ + SOCKET sock = WSASocket(AF_INET, SOCK_DGRAM, 0, 0, 0, 0); + INTERFACE_INFO interfaces[20]; + unsigned long bytes; + int interface_count, x; + int r = -1; + + *mask = 0; + + if (sock == SOCKET_ERROR) { + return -1; + } + + if (WSAIoctl(sock, SIO_GET_INTERFACE_LIST, 0, 0, &interfaces, sizeof(interfaces), &bytes, 0, 0) == SOCKET_ERROR) { + r = -1; + goto end; + } + + interface_count = bytes / sizeof(INTERFACE_INFO); + + for (x = 0; x < interface_count; ++x) { + struct sockaddr_in *addr = (struct sockaddr_in *) &(interfaces[x].iiAddress); + + if (addr->sin_addr.s_addr == me->sin_addr.s_addr) { + struct sockaddr_in *netmask = (struct sockaddr_in *) &(interfaces[x].iiNetmask); + *mask = netmask->sin_addr.s_addr; + r = 0; + break; + } + } + + end: + closesocket(sock); + return r; +} + +#else + +static int get_netmask(struct sockaddr_in *me, int *mask) +{ + return -1; +} + +#endif + + +KS_DECLARE(ks_status_t) ks_ip_route(char *buf, int len, const char *route_ip) +{ + int family = AF_INET; + + ks_assert(route_ip); + + if (strchr(route_ip, ':')) { + family = AF_INET6; + } + + return ks_find_local_ip(buf, len, NULL, family, route_ip); +} + +KS_DECLARE(ks_status_t) ks_find_local_ip(char *buf, int len, int *mask, int family, const char *route_ip) +{ + ks_status_t status = KS_STATUS_FAIL; + char *base = (char *)route_ip; + +#ifdef WIN32 + SOCKET tmp_socket; + SOCKADDR_STORAGE l_address; + int l_address_len; + struct addrinfo *address_info = NULL; +#else +#ifdef __Darwin__ + int ilen; +#else + unsigned int ilen; +#endif + int tmp_socket = -1, on = 1; + char abuf[25] = ""; +#endif + + if (len < 16) { + return status; + } + + switch (family) { + case AF_INET: + ks_copy_string(buf, "127.0.0.1", len); + if (!base) { + base = "82.45.148.209"; + } + break; + case AF_INET6: + ks_copy_string(buf, "::1", len); + if (!base) { + base = "2001:503:BA3E::2:30"; /* DNS Root server A */ + } + break; + default: + base = "127.0.0.1"; + break; + } + +#ifdef WIN32 + tmp_socket = socket(family, SOCK_DGRAM, 0); + + getaddrinfo(base, NULL, NULL, &address_info); + + if (!address_info || WSAIoctl(tmp_socket, + SIO_ROUTING_INTERFACE_QUERY, + address_info->ai_addr, (DWORD) address_info->ai_addrlen, &l_address, sizeof(l_address), (LPDWORD) & l_address_len, NULL, + NULL)) { + + closesocket(tmp_socket); + if (address_info) + freeaddrinfo(address_info); + return status; + } + + + closesocket(tmp_socket); + freeaddrinfo(address_info); + + if (!getnameinfo((const struct sockaddr *) &l_address, l_address_len, buf, len, NULL, 0, NI_NUMERICHOST)) { + status = KS_STATUS_SUCCESS; + if (mask && family == AF_INET) { + get_netmask((struct sockaddr_in *) &l_address, mask); + } + } +#else + + switch (family) { + case AF_INET: + { + struct sockaddr_in iface_out; + struct sockaddr_in remote; + memset(&remote, 0, sizeof(struct sockaddr_in)); + + remote.sin_family = AF_INET; + remote.sin_addr.s_addr = inet_addr(base); + remote.sin_port = htons(4242); + + memset(&iface_out, 0, sizeof(iface_out)); + if ( (tmp_socket = socket(AF_INET, SOCK_DGRAM, 0)) == -1 ) { + goto doh; + } + + if (setsockopt(tmp_socket, SOL_SOCKET, SO_BROADCAST, &on, sizeof(on)) == -1) { + goto doh; + } + + if (connect(tmp_socket, (struct sockaddr *) &remote, sizeof(struct sockaddr_in)) == -1) { + goto doh; + } + + ilen = sizeof(iface_out); + if (getsockname(tmp_socket, (struct sockaddr *) &iface_out, &ilen) == -1) { + goto doh; + } + + if (iface_out.sin_addr.s_addr == 0) { + goto doh; + } + + getnameinfo((struct sockaddr *) &iface_out, sizeof(iface_out), abuf, sizeof(abuf), NULL, 0, NI_NUMERICHOST); + ks_copy_string(buf, abuf, len); + + if (mask && family == AF_INET) { + get_netmask((struct sockaddr_in *) &iface_out, mask); + } + + status = KS_STATUS_SUCCESS; + } + break; + case AF_INET6: + { + struct sockaddr_in6 iface_out; + struct sockaddr_in6 remote; + memset(&remote, 0, sizeof(struct sockaddr_in6)); + + remote.sin6_family = AF_INET6; + inet_pton(AF_INET6, base, &remote.sin6_addr); + remote.sin6_port = htons(4242); + + memset(&iface_out, 0, sizeof(iface_out)); + if ( (tmp_socket = socket(AF_INET6, SOCK_DGRAM, 0)) == -1 ) { + goto doh; + } + + if (connect(tmp_socket, (struct sockaddr *) &remote, sizeof(remote)) == -1) { + goto doh; + } + + ilen = sizeof(iface_out); + if (getsockname(tmp_socket, (struct sockaddr *) &iface_out, &ilen) == -1) { + goto doh; + } + + inet_ntop(AF_INET6, (const void *) &iface_out.sin6_addr, buf, len - 1); + + status = KS_STATUS_SUCCESS; + } + break; + } + + doh: + if (tmp_socket > 0) { + close(tmp_socket); + } +#endif + + return status; +} + + +KS_DECLARE(ks_status_t) ks_addr_raw_data(const ks_sockaddr_t *addr, void **data, ks_size_t *datalen) +{ + ks_assert(addr->family == AF_INET || addr->family == AF_INET6); + + if (addr->family == AF_INET) { + *data = (void *)&addr->v.v4.sin_addr; + *datalen = 4; + } else { + *data = (void *)&addr->v.v6.sin6_addr; + *datalen = 16; + } + + return KS_STATUS_SUCCESS; +} + +KS_DECLARE(ks_status_t) ks_socket_send(ks_socket_t sock, void *data, ks_size_t *datalen) +{ + ks_ssize_t r; + ks_status_t status = KS_STATUS_FAIL; + + do { +#ifdef WIN32 + r = send(sock, data, (int)*datalen, 0); +#else + r = send(sock, data, *datalen, 0); +#endif + } while (r == -1 && ks_errno_is_interupt(ks_errno())); + + if (r > 0) { + *datalen = (ks_size_t) r; + status = KS_STATUS_SUCCESS; + } else if (r == 0) { + status = KS_STATUS_DISCONNECTED; + + } else if (ks_errno_is_blocking(ks_errno())) { + status = KS_STATUS_BREAK; + } + +return status; +} + +KS_DECLARE(ks_status_t) ks_socket_recv(ks_socket_t sock, void *data, ks_size_t *datalen) +{ + ks_ssize_t r; + ks_status_t status = KS_STATUS_FAIL; + + do { +#ifdef WIN32 + r = recv(sock, data, (int)*datalen, 0); +#else + r = recv(sock, data, *datalen, 0); +#endif + } while (r == -1 && ks_errno_is_interupt(ks_errno())); + + if (r > 0) { + *datalen = (ks_size_t) r; + status = KS_STATUS_SUCCESS; + } else if (r == 0) { + status = KS_STATUS_DISCONNECTED; + } else if (ks_errno_is_blocking(ks_errno())) { + status = KS_STATUS_BREAK; + } + + return status; +} + +KS_DECLARE(ks_status_t) ks_socket_sendto(ks_socket_t sock, void *data, ks_size_t *datalen, ks_sockaddr_t *addr) +{ + struct sockaddr *sockaddr; + socklen_t socksize = 0; + ks_status_t status = KS_STATUS_FAIL; + ks_ssize_t r; + + ks_assert(addr); + ks_assert(addr->family == AF_INET || addr->family == AF_INET6); + + if (addr->family == AF_INET) { + sockaddr = (struct sockaddr *) &addr->v.v4; + socksize = sizeof(addr->v.v4); + } else { + sockaddr = (struct sockaddr *) &addr->v.v6; + socksize = sizeof(addr->v.v6); + } + + do { +#ifdef WIN32 + r = sendto(sock, data, (int)*datalen, 0, sockaddr, socksize); +#else + r = sendto(sock, data, *datalen, 0, sockaddr, socksize); +#endif + } while (r == -1 && ks_errno_is_interupt(ks_errno())); + + if (r > 0) { + *datalen = (ks_size_t) r; + status = KS_STATUS_SUCCESS; + } else if (r == 0) { + status = KS_STATUS_DISCONNECTED; + } else if (ks_errno_is_blocking(ks_errno())) { + status = KS_STATUS_BREAK; + } + + return status; + +} + +KS_DECLARE(ks_status_t) ks_socket_recvfrom(ks_socket_t sock, void *data, ks_size_t *datalen, ks_sockaddr_t *addr) +{ + struct sockaddr *sockaddr; + ks_status_t status = KS_STATUS_FAIL; + ks_ssize_t r; + socklen_t alen; + + ks_assert(addr); + ks_assert(addr->family == AF_INET || addr->family == AF_INET6); + + if (addr->family == AF_INET) { + sockaddr = (struct sockaddr *) &addr->v.v4; + alen = sizeof(addr->v.v4); + } else { + sockaddr = (struct sockaddr *) &addr->v.v6; + alen = sizeof(addr->v.v6); + } + + do { +#ifdef WIN32 + r = recvfrom(sock, data, (int)*datalen, 0, sockaddr, &alen); +#else + r = recvfrom(sock, data, *datalen, 0, sockaddr, &alen); +#endif + } while (r == -1 && ks_errno_is_interupt(ks_errno())); + + if (r > 0) { + ks_addr_get_host(addr); + ks_addr_get_port(addr); + *datalen = (ks_size_t) r; + status = KS_STATUS_SUCCESS; + } else if (r == 0) { + status = KS_STATUS_DISCONNECTED; + } else if (ks_errno_is_blocking(ks_errno())) { + status = KS_STATUS_BREAK; + } + + return status; +} + diff --git a/libs/libks/src/ks_ssl.c b/libs/libks/src/ks_ssl.c new file mode 100644 index 0000000000..763bfd3051 --- /dev/null +++ b/libs/libks/src/ks_ssl.c @@ -0,0 +1,250 @@ +/* + * Copyright (c) 2007-2014, Anthony Minessale II + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * * Neither the name of the original author; nor the names of any contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER + * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include + +static ks_mutex_t **ssl_mutexes; +static ks_pool_t *ssl_pool = NULL; +static int ssl_count = 0; +static int is_init = 0; + +static inline void ks_ssl_lock_callback(int mode, int type, char *file, int line) +{ + if (mode & CRYPTO_LOCK) { + ks_mutex_lock(ssl_mutexes[type]); + } + else { + ks_mutex_unlock(ssl_mutexes[type]); + } +} + +static inline unsigned long ks_ssl_thread_id(void) +{ + return (unsigned long) ks_thread_self(); +} + +KS_DECLARE(void) ks_ssl_init_ssl_locks(void) +{ + + int i, num; + + if (is_init) return; + + is_init = 1; + + SSL_library_init(); + + if (ssl_count == 0) { + num = CRYPTO_num_locks(); + + ssl_mutexes = OPENSSL_malloc(CRYPTO_num_locks() * sizeof(ks_mutex_t*)); + ks_assert(ssl_mutexes != NULL); + + ks_pool_open(&ssl_pool); + + for (i = 0; i < num; i++) { + ks_mutex_create(&(ssl_mutexes[i]), KS_MUTEX_FLAG_DEFAULT, ssl_pool); + ks_assert(ssl_mutexes[i] != NULL); + } + + CRYPTO_set_id_callback(ks_ssl_thread_id); + CRYPTO_set_locking_callback((void (*)(int, int, const char*, int))ks_ssl_lock_callback); + } + + ssl_count++; +} + +KS_DECLARE(void) ks_ssl_destroy_ssl_locks(void) +{ + int i; + + if (!is_init) return; + + is_init = 0; + + if (ssl_count == 1) { + CRYPTO_set_locking_callback(NULL); + for (i = 0; i < CRYPTO_num_locks(); i++) { + if (ssl_mutexes[i]) { + ks_mutex_destroy(&ssl_mutexes[i]); + } + } + + OPENSSL_free(ssl_mutexes); + ssl_count--; + } +} + + + +static int mkcert(X509 **x509p, EVP_PKEY **pkeyp, int bits, int serial, int days); + +KS_DECLARE(int) ks_gen_cert(const char *dir, const char *file) +{ + //BIO *bio_err; + X509 *x509 = NULL; + EVP_PKEY *pkey = NULL; + char *rsa = NULL, *pvt = NULL; + FILE *fp; + char *pem = NULL; + + if (ks_stristr(".pem", file)) { + pem = ks_mprintf("%s%s%s", dir, KS_PATH_SEPARATOR, file); + } else { + pvt = ks_mprintf("%s%s%s.key", dir, KS_PATH_SEPARATOR, file); + rsa = ks_mprintf("%s%s%s.crt", dir, KS_PATH_SEPARATOR, file); + } + + CRYPTO_mem_ctrl(CRYPTO_MEM_CHECK_ON); + + //bio_err=BIO_new_fp(stderr, BIO_NOCLOSE); + + mkcert(&x509, &pkey, 1024, 0, 36500); + + //RSA_print_fp(stdout, pkey->pkey.rsa, 0); + //X509_print_fp(stdout, x509); + + if (pem) { + if ((fp = fopen(pem, "w"))) { + PEM_write_PrivateKey(fp, pkey, NULL, NULL, 0, NULL, NULL); + PEM_write_X509(fp, x509); + fclose(fp); + } + + } else { + if (pvt && (fp = fopen(pvt, "w"))) { + PEM_write_PrivateKey(fp, pkey, NULL, NULL, 0, NULL, NULL); + fclose(fp); + } + + if (rsa && (fp = fopen(rsa, "w"))) { + PEM_write_X509(fp, x509); + fclose(fp); + } + } + + X509_free(x509); + EVP_PKEY_free(pkey); + +#ifndef OPENSSL_NO_ENGINE + ENGINE_cleanup(); +#endif + CRYPTO_cleanup_all_ex_data(); + + //CRYPTO_mem_leaks(bio_err); + //BIO_free(bio_err); + + + ks_safe_free(pvt); + ks_safe_free(rsa); + ks_safe_free(pem); + + return(0); +} + +static int mkcert(X509 **x509p, EVP_PKEY **pkeyp, int bits, int serial, int days) +{ + X509 *x; + EVP_PKEY *pk; + RSA *rsa; + X509_NAME *name=NULL; + + ks_assert(pkeyp); + ks_assert(x509p); + + if (*pkeyp == NULL) { + if ((pk = EVP_PKEY_new()) == NULL) { + abort(); + } + } else { + pk = *pkeyp; + } + + if (*x509p == NULL) { + if ((x = X509_new()) == NULL) { + goto err; + } + } else { + x = *x509p; + } + + rsa = RSA_generate_key(bits, RSA_F4, NULL, NULL); + + if (!EVP_PKEY_assign_RSA(pk, rsa)) { + abort(); + goto err; + } + + rsa = NULL; + + X509_set_version(x, 0); + ASN1_INTEGER_set(X509_get_serialNumber(x), serial); + X509_gmtime_adj(X509_get_notBefore(x), -(long)60*60*24*7); + X509_gmtime_adj(X509_get_notAfter(x), (long)60*60*24*days); + X509_set_pubkey(x, pk); + + name = X509_get_subject_name(x); + + /* This function creates and adds the entry, working out the + * correct string type and performing checks on its length. + * Normally we'd check the return value for errors... + */ + X509_NAME_add_entry_by_txt(name, "C", MBSTRING_ASC, (unsigned char *)"US", -1, -1, 0); + X509_NAME_add_entry_by_txt(name, "CN", MBSTRING_ASC, (unsigned char *)"FreeSWITCH-libKS", -1, -1, 0); + + + /* Its self signed so set the issuer name to be the same as the + * subject. + */ + X509_set_issuer_name(x, name); + + if (!X509_sign(x, pk, EVP_sha1())) + goto err; + + *x509p = x; + *pkeyp = pk; + return(1); + err: + return(0); +} + +/* For Emacs: + * Local Variables: + * mode:c + * indent-tabs-mode:t + * tab-width:4 + * c-basic-offset:4 + * End: + * For VIM: + * vim:set softtabstop=4 shiftwidth=4 tabstop=4 noet: + */ diff --git a/libs/libks/src/ks_string.c b/libs/libks/src/ks_string.c new file mode 100644 index 0000000000..039398f8fe --- /dev/null +++ b/libs/libks/src/ks_string.c @@ -0,0 +1,275 @@ +/* + * Copyright (c) 2007-2014, Anthony Minessale II + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * * Neither the name of the original author; nor the names of any contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER + * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include + +/* Written by Marc Espie, public domain */ +#define KS_CTYPE_NUM_CHARS 256 + +const short _ks_C_toupper_[1 + KS_CTYPE_NUM_CHARS] = { + EOF, + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, + 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, + 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, + 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, + 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, + 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, + 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, + 0x60, 'A', 'B', 'C', 'D', 'E', 'F', 'G', + 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', + 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', + 'X', 'Y', 'Z', 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, + 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, + 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, + 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, + 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, + 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, + 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, + 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, + 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, + 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, + 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, + 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, + 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff +}; + +const short *_ks_toupper_tab_ = _ks_C_toupper_; + +KS_DECLARE(int) ks_toupper(int c) +{ + if ((unsigned int) c > 255) + return (c); + if (c < -1) + return EOF; + return ((_ks_toupper_tab_ + 1)[c]); +} + +const short _ks_C_tolower_[1 + KS_CTYPE_NUM_CHARS] = { + EOF, + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, + 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, + 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, + 0x40, 'a', 'b', 'c', 'd', 'e', 'f', 'g', + 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', + 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', + 'x', 'y', 'z', 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, + 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, + 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, + 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, + 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, + 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, + 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, + 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, + 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, + 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, + 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, + 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, + 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, + 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, + 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, + 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, + 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff +}; + +const short *_ks_tolower_tab_ = _ks_C_tolower_; + +KS_DECLARE(int) ks_tolower(int c) +{ + if ((unsigned int) c > 255) + return (c); + if (c < -1) + return EOF; + return ((_ks_tolower_tab_ + 1)[c]); +} + +KS_DECLARE(const char *) ks_stristr(const char *instr, const char *str) +{ +/* +** Rev History: 16/07/97 Greg Thayer Optimized +** 07/04/95 Bob Stout ANSI-fy +** 02/03/94 Fred Cole Original +** 09/01/03 Bob Stout Bug fix (lines 40-41) per Fred Bulback +** +** Hereby donated to public domain. +*/ + const char *pptr, *sptr, *start; + + if (!str || !instr) + return NULL; + + for (start = str; *start; start++) { + /* find start of pattern in string */ + for (; ((*start) && (ks_toupper(*start) != ks_toupper(*instr))); start++); + + if (!*start) + return NULL; + + pptr = instr; + sptr = start; + + while (ks_toupper(*sptr) == ks_toupper(*pptr)) { + sptr++; + pptr++; + + /* if end of pattern then pattern was found */ + if (!*pptr) + return (start); + + if (!*sptr) + return NULL; + } + } + return NULL; +} + +#ifdef WIN32 +#ifndef vsnprintf +#define vsnprintf _vsnprintf +#endif +#endif + + +int vasprintf(char **ret, const char *format, va_list ap); + +KS_DECLARE(int) ks_vasprintf(char **ret, const char *fmt, va_list ap) +{ +#if !defined(WIN32) && !defined(__sun) + return vasprintf(ret, fmt, ap); +#else + char *buf; + int len; + size_t buflen; + va_list ap2; + char *tmp = NULL; + +#ifdef _MSC_VER +#if _MSC_VER >= 1500 + /* hack for incorrect assumption in msvc header files for code analysis */ + __analysis_assume(tmp); +#endif + ap2 = ap; +#else + va_copy(ap2, ap); +#endif + + len = vsnprintf(tmp, 0, fmt, ap2); + + if (len > 0 && (buf = malloc((buflen = (size_t) (len + 1)))) != NULL) { + len = vsnprintf(buf, buflen, fmt, ap); + *ret = buf; + } else { + *ret = NULL; + len = -1; + } + + va_end(ap2); + return len; +#endif +} + + +KS_DECLARE(int) ks_snprintf(char *buffer, size_t count, const char *fmt, ...) +{ + va_list ap; + int ret; + + va_start(ap, fmt); + ret = vsnprintf(buffer, count - 1, fmt, ap); + if (ret < 0) + buffer[count - 1] = '\0'; + va_end(ap); + return ret; +} + + +KS_DECLARE(unsigned int) ks_separate_string(char *buf, const char *delim, char **array, unsigned int arraylen) +{ + unsigned int count = 0; + char *d; + size_t dlen = strlen(delim); + + array[count++] = buf; + + while (count < arraylen && array[count - 1]) { + if ((d = strstr(array[count - 1], delim))) { + *d = '\0'; + d += dlen; + array[count++] = d; + } else + break; + } + + return count; +} + +KS_DECLARE(char *) ks_copy_string(char *from_str, const char *to_str, ks_size_t from_str_len) +{ + char *p, *e; + + if (!from_str) + return NULL; + if (!to_str) { + *from_str = '\0'; + return from_str; + } + + e = from_str + from_str_len - 1; + + for (p = from_str; p < e; ++p, ++to_str) { + if (!(*p = *to_str)) { + return p; + } + } + + *p = '\0'; + + return p; +} diff --git a/libs/libks/src/ks_thread.c b/libs/libks/src/ks_thread.c new file mode 100644 index 0000000000..63030b81f4 --- /dev/null +++ b/libs/libks/src/ks_thread.c @@ -0,0 +1,285 @@ +/* + * Cross Platform Thread/Mutex abstraction + * Copyright(C) 2007 Michael Jerris + * + * You may opt to use, copy, modify, merge, publish, distribute and/or sell + * copies of the Software, and permit persons to whom the Software is + * furnished to do so. + * + * This work is provided under this license on an "as is" basis, without warranty of any kind, + * either expressed or implied, including, without limitation, warranties that the covered code + * is free of defects, merchantable, fit for a particular purpose or non-infringing. The entire + * risk as to the quality and performance of the covered code is with you. Should any covered + * code prove defective in any respect, you (not the initial developer or any other contributor) + * assume the cost of any necessary servicing, repair or correction. This disclaimer of warranty + * constitutes an essential part of this license. No use of any covered code is authorized hereunder + * except under this disclaimer. + * + */ + +#include "ks.h" + +size_t thread_default_stacksize = 240 * 1024; + +#ifndef WIN32 +pthread_once_t init_priority = PTHREAD_ONCE_INIT; +#endif + +KS_DECLARE(ks_thread_os_handle_t) ks_thread_os_handle(ks_thread_t *thread) +{ + return thread->handle; +} + +KS_DECLARE(ks_thread_os_handle_t) ks_thread_self(void) +{ +#ifdef WIN32 + return GetCurrentThread(); +#else + return pthread_self(); +#endif +} + +static void ks_thread_init_priority(void) +{ +#ifdef WIN32 + SetPriorityClass(GetCurrentProcess(), HIGH_PRIORITY_CLASS); +#else +#ifdef USE_SCHED_SETSCHEDULER + /* + * Try to use a round-robin scheduler + * with a fallback if that does not work + */ + struct sched_param sched = { 0 }; + sched.sched_priority = KS_PRI_LOW; + if (sched_setscheduler(0, SCHED_FIFO, &sched)) { + sched.sched_priority = 0; + if (sched_setscheduler(0, SCHED_OTHER, &sched)) { + return; + } + } +#endif +#endif + return; +} + +void ks_thread_override_default_stacksize(size_t size) +{ + thread_default_stacksize = size; +} + +static void ks_thread_cleanup(ks_pool_t *mpool, void *ptr, void *arg, int type, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t ctype) +{ + ks_thread_t *thread = (ks_thread_t *) ptr; + + switch(action) { + case KS_MPCL_ANNOUNCE: + thread->running = 0; + break; + case KS_MPCL_TEARDOWN: + if (!(thread->flags & KS_THREAD_FLAG_DETATCHED)) { + ks_thread_join(thread); + } + break; + case KS_MPCL_DESTROY: +#ifdef WIN32 + if (!(thread->flags & KS_THREAD_FLAG_DETATCHED)) { + CloseHandle(thread->handle); + } +#endif + break; + } +} + +static void *KS_THREAD_CALLING_CONVENTION thread_launch(void *args) +{ + ks_thread_t *thread = (ks_thread_t *) args; + +#ifdef HAVE_PTHREAD_SETSCHEDPARAM + if (thread->priority) { + int policy = SCHED_FIFO; + struct sched_param param = { 0 }; + pthread_t tt = pthread_self(); + + pthread_once(&init_priority, ks_thread_init_priority); + pthread_getschedparam(tt, &policy, ¶m); + param.sched_priority = thread->priority; + pthread_setschedparam(tt, policy, ¶m); + } +#endif + + thread->return_data = thread->function(thread, thread->private_data); +#ifndef WIN32 + pthread_attr_destroy(&thread->attribute); +#endif + + return thread->return_data; +} + +KS_DECLARE(int) ks_thread_set_priority(int nice_val) +{ +#ifdef WIN32 + SetPriorityClass(GetCurrentProcess(), HIGH_PRIORITY_CLASS); +#else +#ifdef USE_SCHED_SETSCHEDULER + /* + * Try to use a round-robin scheduler + * with a fallback if that does not work + */ + struct sched_param sched = { 0 }; + sched.sched_priority = KS_PRI_LOW; + if (sched_setscheduler(0, SCHED_FIFO, &sched)) { + sched.sched_priority = 0; + if (sched_setscheduler(0, SCHED_OTHER, &sched)) { + return -1; + } + } +#endif + + if (nice_val) { +#ifdef HAVE_SETPRIORITY + /* + * setpriority() works on FreeBSD (6.2), nice() doesn't + */ + if (setpriority(PRIO_PROCESS, getpid(), nice_val) < 0) { + ks_log(KS_LOG_CRIT, "Could not set nice level\n"); + return -1; + } +#else + if (nice(nice_val) != nice_val) { + ks_log(KS_LOG_CRIT, "Could not set nice level\n"); + return -1; + } +#endif + } +#endif + + return 0; +} + +KS_DECLARE(uint8_t) ks_thread_priority(ks_thread_t *thread) { + uint8_t priority = 0; +#ifdef WIN32 + DWORD pri = GetThreadPriority(thread->handle); + + if (pri >= THREAD_PRIORITY_TIME_CRITICAL) { + priority = 99; + } else if (pri >= THREAD_PRIORITY_ABOVE_NORMAL) { + priority = 50; + } else { + priority = 10; + } +#else + int policy; + struct sched_param param = { 0 }; + + pthread_getschedparam(thread->handle, &policy, ¶m); + priority = param.sched_priority; +#endif + return priority; +} + +KS_DECLARE(ks_status_t) ks_thread_join(ks_thread_t *thread) { +#ifdef WIN32 + WaitForSingleObject(thread->handle, INFINITE); +#else + void *ret; + pthread_join(thread->handle, &ret); +#endif + return KS_STATUS_SUCCESS; +} + +KS_DECLARE(ks_status_t) ks_thread_create_ex(ks_thread_t **rthread, ks_thread_function_t func, void *data, + uint32_t flags, size_t stack_size, ks_thread_priority_t priority, ks_pool_t *pool) +{ + ks_thread_t *thread = NULL; + ks_status_t status = KS_STATUS_FAIL; + + if (!rthread) goto done; + + *rthread = NULL; + + if (!func || !pool) goto done; + + thread = (ks_thread_t *) ks_pool_alloc(pool, sizeof(ks_thread_t)); + + if (!thread) goto done; + + thread->private_data = data; + thread->function = func; + thread->stack_size = stack_size; + thread->running = 1; + thread->flags = flags; + thread->priority = priority; + thread->pool = pool; + +#if defined(WIN32) + thread->handle = (void *) _beginthreadex(NULL, (unsigned) thread->stack_size, (unsigned int (__stdcall *) (void *)) thread_launch, thread, 0, NULL); + + if (!thread->handle) { + goto fail; + } + + if (priority >= 99) { + SetThreadPriority(thread->handle, THREAD_PRIORITY_TIME_CRITICAL); + } else if (priority >= 50) { + SetThreadPriority(thread->handle, THREAD_PRIORITY_ABOVE_NORMAL); + } else if (priority >= 10) { + SetThreadPriority(thread->handle, THREAD_PRIORITY_NORMAL); + } else if (priority >= 1) { + SetThreadPriority(thread->handle, THREAD_PRIORITY_LOWEST); + } + + if (flags & KS_THREAD_FLAG_DETATCHED) { + CloseHandle(thread->handle); + } + + status = KS_STATUS_SUCCESS; + goto done; +#else + + if (pthread_attr_init(&thread->attribute) != 0) + goto fail; + + if ((flags & KS_THREAD_FLAG_DETATCHED) && pthread_attr_setdetachstate(&thread->attribute, PTHREAD_CREATE_DETACHED) != 0) + goto failpthread; + + if (thread->stack_size && pthread_attr_setstacksize(&thread->attribute, thread->stack_size) != 0) + goto failpthread; + + if (pthread_create(&thread->handle, &thread->attribute, thread_launch, thread) != 0) + goto failpthread; + + status = KS_STATUS_SUCCESS; + goto done; + + failpthread: + + pthread_attr_destroy(&thread->attribute); +#endif + + fail: + if (thread) { + thread->running = 0; + if (pool) { + ks_pool_safe_free(pool, thread); + } + } + done: + if (status == KS_STATUS_SUCCESS) { + *rthread = thread; + ks_pool_set_cleanup(pool, thread, NULL, 0, ks_thread_cleanup); + } + + return status; +} + +/* For Emacs: + * Local Variables: + * mode:c + * indent-tabs-mode:t + * tab-width:4 + * c-basic-offset:4 + * End: + * For VIM: + * vim:set softtabstop=4 shiftwidth=4 tabstop=4 noet: + */ diff --git a/libs/libks/src/ks_time.c b/libs/libks/src/ks_time.c new file mode 100644 index 0000000000..1805b61c08 --- /dev/null +++ b/libs/libks/src/ks_time.c @@ -0,0 +1,270 @@ +/* + * Copyright (c) 2007-2014, Anthony Minessale II + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * * Neither the name of the original author; nor the names of any contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER + * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include + +#ifdef WIN32 +static CRITICAL_SECTION timer_section; +static ks_time_t win32_tick_time_since_start = -1; +static DWORD win32_last_get_time_tick = 0; + +static uint8_t win32_use_qpc = 0; +static uint64_t win32_qpc_freq = 0; +static int timer_init; +static inline void win32_init_timers(void) +{ + OSVERSIONINFOEX version_info; /* Used to fetch current OS version from Windows */ + InitializeCriticalSection(&timer_section); + EnterCriticalSection(&timer_section); + + ZeroMemory(&version_info, sizeof(OSVERSIONINFOEX)); + version_info.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); + + /* Check if we should use timeGetTime() (pre-Vista) or QueryPerformanceCounter() (Vista and later) */ + + if (GetVersionEx((OSVERSIONINFO*) &version_info)) { + if (version_info.dwPlatformId == VER_PLATFORM_WIN32_NT && version_info.dwMajorVersion >= 6) { + if (QueryPerformanceFrequency((LARGE_INTEGER*)&win32_qpc_freq) && win32_qpc_freq > 0) { + /* At least Vista, and QueryPerformanceFrequency() suceeded, enable qpc */ + win32_use_qpc = 1; + } else { + /* At least Vista, but QueryPerformanceFrequency() failed, disable qpc */ + win32_use_qpc = 0; + } + } else { + /* Older then Vista, disable qpc */ + win32_use_qpc = 0; + } + } else { + /* Unknown version - we want at least Vista, disable qpc */ + win32_use_qpc = 0; + } + + if (win32_use_qpc) { + uint64_t count = 0; + + if (!QueryPerformanceCounter((LARGE_INTEGER*)&count) || count == 0) { + /* Call to QueryPerformanceCounter() failed, disable qpc again */ + win32_use_qpc = 0; + } + } + + if (!win32_use_qpc) { + /* This will enable timeGetTime() instead, qpc init failed */ + win32_last_get_time_tick = timeGetTime(); + win32_tick_time_since_start = win32_last_get_time_tick; + } + + LeaveCriticalSection(&timer_section); + + timer_init = 1; +} + +KS_DECLARE(ks_time_t) ks_time_now(void) +{ + ks_time_t now; + + if (!timer_init) { + win32_init_timers(); + } + + if (win32_use_qpc) { + /* Use QueryPerformanceCounter */ + uint64_t count = 0; + QueryPerformanceCounter((LARGE_INTEGER*)&count); + now = ((count * 1000000) / win32_qpc_freq); + } else { + /* Use good old timeGetTime() */ + DWORD tick_now; + DWORD tick_diff; + + tick_now = timeGetTime(); + if (win32_tick_time_since_start != -1) { + EnterCriticalSection(&timer_section); + /* just add diff (to make it work more than 50 days). */ + tick_diff = tick_now - win32_last_get_time_tick; + win32_tick_time_since_start += tick_diff; + + win32_last_get_time_tick = tick_now; + now = (win32_tick_time_since_start * 1000); + LeaveCriticalSection(&timer_section); + } else { + /* If someone is calling us before timer is initialized, + * return the current tick + */ + now = (tick_now * 1000); + } + } + + return now; +} + +KS_DECLARE(ks_time_t) ks_time_now_sec(void) +{ + ks_time_t now; + + if (!timer_init) { + win32_init_timers(); + } + + if (win32_use_qpc) { + /* Use QueryPerformanceCounter */ + uint64_t count = 0; + QueryPerformanceCounter((LARGE_INTEGER*)&count); + now = (count / win32_qpc_freq); + } else { + /* Use good old timeGetTime() */ + DWORD tick_now; + DWORD tick_diff; + + tick_now = timeGetTime(); + if (win32_tick_time_since_start != -1) { + EnterCriticalSection(&timer_section); + /* just add diff (to make it work more than 50 days). */ + tick_diff = tick_now - win32_last_get_time_tick; + win32_tick_time_since_start += tick_diff; + + win32_last_get_time_tick = tick_now; + now = (win32_tick_time_since_start / 1000); + LeaveCriticalSection(&timer_section); + } else { + /* If someone is calling us before timer is initialized, + * return the current tick + */ + now = (tick_now / 1000); + } + } + + return now; +} + +KS_DECLARE(void) ks_sleep(ks_time_t microsec) +{ + + LARGE_INTEGER perfCnt, start, now; + + QueryPerformanceFrequency(&perfCnt); + QueryPerformanceCounter(&start); + + do { + QueryPerformanceCounter((LARGE_INTEGER*) &now); + } while ((now.QuadPart - start.QuadPart) / (float)(perfCnt.QuadPart) * 1000 * 1000 < (DWORD)microsec); + +} + +#else //!WINDOWS, UNIX ETC +KS_DECLARE(ks_time_t) ks_time_now(void) +{ + ks_time_t now; + +#if (defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_REALTIME)) + struct timespec ts; + clock_gettime(CLOCK_REALTIME, &ts); + now = (int64_t)ts.tv_sec * 1000000 + ((int64_t)ts.tv_nsec / 1000); +#else + struct timeval tv; + gettimeofday(&tv, NULL); + now = tv.tv_sec * 1000000 + tv.tv_usec; +#endif + + return now; +} + +KS_DECLARE(ks_time_t) ks_time_now_sec(void) +{ + ks_time_t now; + +#if (defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_REALTIME)) + struct timespec ts; + clock_gettime(CLOCK_REALTIME, &ts); + now = (int64_t)ts.tv_sec; +#else + struct timeval tv; + gettimeofday(&tv, NULL); + now = tv.tv_sec; +#endif + + return now; +} + +#if !defined(HAVE_CLOCK_NANOSLEEP) && !defined(__APPLE__) +static void generic_sleep(ks_time_t microsec) +{ +#ifdef HAVE_USLEEP + usleep(microsec); +#else + struct timeval tv; + tv.tv_usec = ks_time_usec(microsec); + tv.tv_sec = ks_time_sec(microsec); + select(0, NULL, NULL, NULL, &tv); +#endif +} +#endif + +KS_DECLARE(void) ks_sleep(ks_time_t microsec) +{ +#if defined(HAVE_CLOCK_NANOSLEEP) || defined(__APPLE__) + struct timespec ts; +#endif + +#if defined(HAVE_CLOCK_NANOSLEEP) + ts.tv_sec = ks_time_sec(microsec); + ts.tv_nsec = ks_time_nsec(microsec); + clock_nanosleep(CLOCK_MONOTONIC, 0, &ts, NULL); +#elif defined(__APPLE__) + ts.tv_sec = ks_time_sec(microsec); + ts.tv_nsec = ks_time_usec(microsec) * 850; + nanosleep(&ts, NULL); +#else + generic_sleep(microsec); +#endif + +#if defined(__APPLE__) + sched_yield(); +#endif + +} + +#endif + + +/* For Emacs: + * Local Variables: + * mode:c + * indent-tabs-mode:t + * tab-width:4 + * c-basic-offset:4 + * End: + * For VIM: + * vim:set softtabstop=4 shiftwidth=4 tabstop=4 noet: + */ diff --git a/libs/libks/src/kws.c b/libs/libks/src/kws.c new file mode 100644 index 0000000000..6127d274ef --- /dev/null +++ b/libs/libks/src/kws.c @@ -0,0 +1,1146 @@ +/* + * Copyright (c) 2007-2014, Anthony Minessale II + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * * Neither the name of the original author; nor the names of any contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER + * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "ks.h" + + +#ifdef _MSC_VER +/* warning C4706: assignment within conditional expression*/ +#pragma warning(disable: 4706) +#endif + +#define WS_BLOCK 1 +#define WS_NOBLOCK 0 + +#define SHA1_HASH_SIZE 20 + +static const char c64[65] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; + +//static ks_ssize_t ws_send_buf(kws_t *kws, kws_opcode_t oc); +//static ks_ssize_t ws_feed_buf(kws_t *kws, void *data, ks_size_t bytes); + + +struct kws_s { + ks_pool_t *pool; + ks_socket_t sock; + kws_type_t type; + char *buffer; + char *bbuffer; + char *body; + char *uri; + ks_size_t buflen; + ks_size_t bbuflen; + ks_ssize_t datalen; + ks_ssize_t wdatalen; + char *payload; + ks_ssize_t plen; + ks_ssize_t rplen; + ks_ssize_t packetlen; + SSL *ssl; + int handshake; + uint8_t down; + int secure; + uint8_t close_sock; + SSL_CTX *ssl_ctx; + int block; + int sanity; + int secure_established; + int logical_established; + int stay_open; + int x; + void *write_buffer; + ks_size_t write_buffer_len; + char *req_uri; + char *req_host; + char *req_proto; +}; + + + +static int cheezy_get_var(char *data, char *name, char *buf, ks_size_t buflen) +{ + char *p=data; + + /* the old way didnt make sure that variable values were used for the name hunt + * and didnt ensure that only a full match of the variable name was used + */ + + do { + if(!strncmp(p,name,strlen(name)) && *(p+strlen(name))==':') break; + } while((p = (strstr(p,"\n")+1))!=(char *)1); + + + if (p && p != (char *)1 && *p!='\0') { + char *v, *e = 0; + + v = strchr(p, ':'); + if (v) { + v++; + while(v && *v == ' ') { + v++; + } + if (v) { + e = strchr(v, '\r'); + if (!e) { + e = strchr(v, '\n'); + } + } + + if (v && e) { + int cplen; + ks_size_t len = e - v; + + if (len > buflen - 1) { + cplen = buflen -1; + } else { + cplen = len; + } + + strncpy(buf, v, cplen); + *(buf+cplen) = '\0'; + return 1; + } + + } + } + return 0; +} + +static int b64encode(unsigned char *in, ks_size_t ilen, unsigned char *out, ks_size_t olen) +{ + int y=0,bytes=0; + ks_size_t x=0; + unsigned int b=0,l=0; + + if(olen) { + } + + for(x=0;x= 6) { + out[bytes++] = c64[(b>>(l-=6))%64]; + if(++y!=72) { + continue; + } + //out[bytes++] = '\n'; + y=0; + } + } + + if (l > 0) { + out[bytes++] = c64[((b%16)<<(6-l))%64]; + } + if (l != 0) while (l < 6) { + out[bytes++] = '=', l += 2; + } + + return 0; +} + +static void sha1_digest(unsigned char *digest, char *in) +{ + SHA_CTX sha; + + SHA1_Init(&sha); + SHA1_Update(&sha, in, strlen(in)); + SHA1_Final(digest, &sha); + +} + +/* fix me when we get real rand funcs in ks */ +static void gen_nonce(unsigned char *buf, uint16_t len) +{ + int max = 255; + uint16_t x; + ks_time_t time_now = ks_time_now(); + srand((unsigned int)(((time_now >> 32) ^ time_now) & 0xffffffff)); + + for (x = 0; x < len; x++) { + int j = (int) (max * 1.0 * rand() / (RAND_MAX + 1.0)); + buf[x] = (char) j; + } +} + +static int verify_accept(kws_t *kws, const unsigned char *enonce, const char *accept) +{ + char input[256] = ""; + unsigned char output[SHA1_HASH_SIZE] = ""; + char b64[256] = ""; + + snprintf(input, sizeof(input), "%s%s", enonce, WEBSOCKET_GUID); + sha1_digest(output, input); + b64encode((unsigned char *)output, SHA1_HASH_SIZE, (unsigned char *)b64, sizeof(b64)); + + return !strcmp(b64, accept); +} + +static int ws_client_handshake(kws_t *kws) +{ + unsigned char nonce[16]; + unsigned char enonce[128] = ""; + char req[256] = ""; + + gen_nonce(nonce, sizeof(nonce)); + b64encode(nonce, sizeof(nonce), enonce, sizeof(enonce)); + + ks_snprintf(req, sizeof(req), + "GET %s HTTP/1.1\r\n" + "Host: %s\n" + "Upgrade: websocket\r\n" + "Connection: Upgrade\r\n" + "Sec-WebSocket-Key: %s\r\n" + "Sec-WebSocket-Protocol: %s\r\n" + "Sec-WebSocket-Version: 13\r\n" + "\r\n", + kws->req_uri, kws->req_host, enonce, kws->req_proto); + + kws_raw_write(kws, req, strlen(req)); + + ks_ssize_t bytes; + + do { + bytes = kws_raw_read(kws, kws->buffer + kws->datalen, kws->buflen - kws->datalen, WS_BLOCK); + } while (bytes > 0 && !strstr((char *)kws->buffer, "\r\n\r\n")); + + char accept[128] = ""; + + cheezy_get_var(kws->buffer, "Sec-WebSocket-Accept", accept, sizeof(accept)); + + if (zstr_buf(accept) || !verify_accept(kws, enonce, (char *)accept)) { + return -1; + } + + kws->handshake = 1; + + return 0; +} + +static int ws_server_handshake(kws_t *kws) +{ + char key[256] = ""; + char version[5] = ""; + char proto[256] = ""; + char proto_buf[384] = ""; + char input[256] = ""; + unsigned char output[SHA1_HASH_SIZE] = ""; + char b64[256] = ""; + char respond[512] = ""; + ks_ssize_t bytes; + char *p, *e = 0; + + if (kws->sock == KS_SOCK_INVALID) { + return -3; + } + + while((bytes = kws_raw_read(kws, kws->buffer + kws->datalen, kws->buflen - kws->datalen, WS_BLOCK)) > 0) { + kws->datalen += bytes; + if (strstr(kws->buffer, "\r\n\r\n") || strstr(kws->buffer, "\n\n")) { + break; + } + } + + if (bytes > kws->buflen -1) { + goto err; + } + + *(kws->buffer + kws->datalen) = '\0'; + + if (strncasecmp(kws->buffer, "GET ", 4)) { + goto err; + } + + p = kws->buffer + 4; + + e = strchr(p, ' '); + if (!e) { + goto err; + } + + kws->uri = ks_pool_alloc(kws->pool, (e-p) + 1); + strncpy(kws->uri, p, e-p); + *(kws->uri + (e-p)) = '\0'; + + cheezy_get_var(kws->buffer, "Sec-WebSocket-Key", key, sizeof(key)); + cheezy_get_var(kws->buffer, "Sec-WebSocket-Version", version, sizeof(version)); + cheezy_get_var(kws->buffer, "Sec-WebSocket-Protocol", proto, sizeof(proto)); + + if (!*key) { + goto err; + } + + snprintf(input, sizeof(input), "%s%s", key, WEBSOCKET_GUID); + sha1_digest(output, input); + b64encode((unsigned char *)output, SHA1_HASH_SIZE, (unsigned char *)b64, sizeof(b64)); + + if (*proto) { + snprintf(proto_buf, sizeof(proto_buf), "Sec-WebSocket-Protocol: %s\r\n", proto); + } + + snprintf(respond, sizeof(respond), + "HTTP/1.1 101 Switching Protocols\r\n" + "Upgrade: websocket\r\n" + "Connection: Upgrade\r\n" + "Sec-WebSocket-Accept: %s\r\n" + "%s\r\n", + b64, + proto_buf); + respond[511] = 0; + + if (kws_raw_write(kws, respond, strlen(respond)) != (ks_ssize_t)strlen(respond)) { + goto err; + } + + kws->handshake = 1; + + return 0; + + err: + + if (!kws->stay_open) { + + snprintf(respond, sizeof(respond), "HTTP/1.1 400 Bad Request\r\n" + "Sec-WebSocket-Version: 13\r\n\r\n"); + respond[511] = 0; + + kws_raw_write(kws, respond, strlen(respond)); + + kws_close(kws, WS_NONE); + } + + return -1; + +} + +KS_DECLARE(ks_ssize_t) kws_raw_read(kws_t *kws, void *data, ks_size_t bytes, int block) +{ + ks_ssize_t r; + int err = 0; + + kws->x++; + if (kws->x > 250) ks_sleep_ms(1); + + if (kws->ssl) { + do { + r = SSL_read(kws->ssl, data, bytes); + + if (r == -1) { + err = SSL_get_error(kws->ssl, r); + + if (err == SSL_ERROR_WANT_READ) { + if (!block) { + r = -2; + goto end; + } + kws->x++; + ks_sleep_ms(10); + } else { + r = -1; + goto end; + } + } + + } while (r == -1 && err == SSL_ERROR_WANT_READ && kws->x < 1000); + + goto end; + } + + do { + + r = recv(kws->sock, data, bytes, 0); + + if (r == -1) { + if (!block && ks_errno_is_blocking(ks_errno())) { + r = -2; + goto end; + } + + if (block) { + kws->x++; + ks_sleep_ms(10); + } + } + } while (r == -1 && ks_errno_is_blocking(ks_errno()) && kws->x < 1000); + + end: + + if (kws->x >= 10000 || (block && kws->x >= 1000)) { + r = -1; + } + + if (r > 0) { + *((char *)data + r) = '\0'; + } + + if (r >= 0) { + kws->x = 0; + } + + return r; +} + +KS_DECLARE(ks_ssize_t) kws_raw_write(kws_t *kws, void *data, ks_size_t bytes) +{ + ks_ssize_t r; + int sanity = 2000; + int ssl_err = 0; + ks_size_t wrote = 0; + + if (kws->ssl) { + do { + r = SSL_write(kws->ssl, (void *)((unsigned char *)data + wrote), bytes - wrote); + + if (r > 0) { + wrote += r; + } + + if (sanity < 2000) { + ks_sleep_ms(1); + } + + if (r == -1) { + ssl_err = SSL_get_error(kws->ssl, r); + } + + } while (--sanity > 0 && ((r == -1 && ssl_err == SSL_ERROR_WANT_WRITE) || (kws->block && wrote < bytes))); + + if (ssl_err) { + r = ssl_err * -1; + } + + return r; + } + + do { + r = send(kws->sock, (void *)((unsigned char *)data + wrote), bytes - wrote, 0); + + if (r > 0) { + wrote += r; + } + + if (sanity < 2000) { + ks_sleep_ms(1); + } + + } while (--sanity > 0 && ((r == -1 && ks_errno_is_blocking(ks_errno())) || (kws->block && wrote < bytes))); + + //if (r<0) { + //printf("wRITE FAIL: %s\n", strerror(errno)); + //} + + return r; +} + +static void setup_socket(ks_socket_t sock) +{ + ks_socket_option(sock, KS_SO_NONBLOCK, KS_TRUE); +} + +static void restore_socket(ks_socket_t sock) +{ + ks_socket_option(sock, KS_SO_NONBLOCK, KS_FALSE); +} + +static int establish_client_logical_layer(kws_t *kws) +{ + + if (!kws->sanity) { + return -1; + } + + if (kws->logical_established) { + return 0; + } + + if (kws->secure && !kws->secure_established) { + int code; + + if (!kws->ssl) { + kws->ssl = SSL_new(kws->ssl_ctx); + assert(kws->ssl); + + SSL_set_fd(kws->ssl, kws->sock); + } + + do { + code = SSL_connect(kws->ssl); + + if (code == 1) { + kws->secure_established = 1; + break; + } + + if (code == 0) { + return -1; + } + + if (code < 0) { + if (code == -1 && SSL_get_error(kws->ssl, code) != SSL_ERROR_WANT_READ) { + return -1; + } + } + + if (kws->block) { + ks_sleep_ms(10); + } else { + ks_sleep_ms(1); + } + + kws->sanity--; + + if (!kws->block) { + return -2; + } + + } while (kws->sanity > 0); + + if (!kws->sanity) { + return -1; + } + } + + while (!kws->down && !kws->handshake) { + int r = ws_client_handshake(kws); + + if (r < 0) { + kws->down = 1; + return -1; + } + + if (!kws->handshake && !kws->block) { + return -2; + } + + } + + kws->logical_established = 1; + + return 0; +} + +static int establish_server_logical_layer(kws_t *kws) +{ + + if (!kws->sanity) { + return -1; + } + + if (kws->logical_established) { + return 0; + } + + if (kws->secure && !kws->secure_established) { + int code; + + if (!kws->ssl) { + kws->ssl = SSL_new(kws->ssl_ctx); + assert(kws->ssl); + + SSL_set_fd(kws->ssl, kws->sock); + } + + do { + code = SSL_accept(kws->ssl); + + if (code == 1) { + kws->secure_established = 1; + break; + } + + if (code == 0) { + return -1; + } + + if (code < 0) { + if (code == -1 && SSL_get_error(kws->ssl, code) != SSL_ERROR_WANT_READ) { + return -1; + } + } + + if (kws->block) { + ks_sleep_ms(10); + } else { + ks_sleep_ms(1); + } + + kws->sanity--; + + if (!kws->block) { + return -2; + } + + } while (kws->sanity > 0); + + if (!kws->sanity) { + return -1; + } + + } + + while (!kws->down && !kws->handshake) { + int r = ws_server_handshake(kws); + + if (r < 0) { + kws->down = 1; + return -1; + } + + if (!kws->handshake && !kws->block) { + return -2; + } + + } + + kws->logical_established = 1; + + return 0; +} + +static int establish_logical_layer(kws_t *kws) +{ + if (kws->type == KWS_CLIENT) { + return establish_client_logical_layer(kws); + } else { + return establish_server_logical_layer(kws); + } +} + + +KS_DECLARE(ks_status_t) kws_init(kws_t **kwsP, ks_socket_t sock, SSL_CTX *ssl_ctx, const char *client_data, kws_flag_t flags, ks_pool_t *pool) +{ + kws_t *kws; + + kws = ks_pool_alloc(pool, sizeof(*kws)); + kws->pool = pool; + + if ((flags & KWS_CLOSE_SOCK)) { + kws->close_sock = 1; + } + + if ((flags & KWS_STAY_OPEN)) { + kws->stay_open = 1; + } + + if ((flags & KWS_BLOCK)) { + kws->block = 1; + } + + if (client_data) { + char *p = NULL; + kws->req_uri = ks_pstrdup(kws->pool, client_data); + + if ((p = strchr(kws->req_uri, ':'))) { + *p++ = '\0'; + kws->req_host = p; + if ((p = strchr(kws->req_host, ':'))) { + *p++ = '\0'; + kws->req_proto = p; + } + } + + kws->type = KWS_CLIENT; + } else { + kws->type = KWS_SERVER; + } + + kws->sock = sock; + kws->sanity = 5000; + kws->ssl_ctx = ssl_ctx; + + kws->buflen = 1024 * 64; + kws->bbuflen = kws->buflen; + + kws->buffer = ks_pool_alloc(kws->pool, kws->buflen); + kws->bbuffer = ks_pool_alloc(kws->pool, kws->bbuflen); + //printf("init %p %ld\n", (void *) kws->bbuffer, kws->bbuflen); + //memset(kws->buffer, 0, kws->buflen); + //memset(kws->bbuffer, 0, kws->bbuflen); + + kws->secure = ssl_ctx ? 1 : 0; + + setup_socket(sock); + + if (establish_logical_layer(kws) == -1) { + goto err; + } + + if (kws->down) { + goto err; + } + + *kwsP = kws; + + return KS_STATUS_SUCCESS; + + err: + + kws_destroy(&kws); + + return KS_STATUS_FAIL; +} + +KS_DECLARE(void) kws_destroy(kws_t **kwsP) +{ + kws_t *kws; + ks_assert(kwsP); + + if (!(kws = *kwsP)) { + return; + } + + *kwsP = NULL; + + if (!kws->down) { + kws_close(kws, WS_NONE); + } + + if (kws->down > 1) { + return; + } + + kws->down = 2; + + if (kws->write_buffer) { + ks_pool_free(kws->pool, kws->write_buffer); + kws->write_buffer = NULL; + kws->write_buffer_len = 0; + } + + if (kws->ssl) { + int code; + do { + code = SSL_shutdown(kws->ssl); + } while (code == -1 && SSL_get_error(kws->ssl, code) == SSL_ERROR_WANT_READ); + + SSL_free(kws->ssl); + kws->ssl = NULL; + } + + if (kws->buffer) ks_pool_free(kws->pool, kws->buffer); + if (kws->bbuffer) ks_pool_free(kws->pool, kws->bbuffer); + + kws->buffer = kws->bbuffer = NULL; + + ks_pool_free(kws->pool, kws); + kws = NULL; +} + +KS_DECLARE(ks_ssize_t) kws_close(kws_t *kws, int16_t reason) +{ + + if (kws->down) { + return -1; + } + + kws->down = 1; + + if (kws->uri) { + ks_pool_free(kws->pool, kws->uri); + kws->uri = NULL; + } + + if (reason && kws->sock != KS_SOCK_INVALID) { + uint16_t *u16; + uint8_t fr[4] = {WSOC_CLOSE | 0x80, 2, 0}; + + u16 = (uint16_t *) &fr[2]; + *u16 = htons((int16_t)reason); + kws_raw_write(kws, fr, 4); + } + + restore_socket(kws->sock); + + if (kws->close_sock && kws->sock != KS_SOCK_INVALID) { +#ifndef WIN32 + close(kws->sock); +#else + closesocket(kws->sock); +#endif + } + + kws->sock = KS_SOCK_INVALID; + + return reason * -1; + +} + +#ifndef WIN32 +#if defined(HAVE_BYTESWAP_H) +#include +#elif defined(HAVE_SYS_ENDIAN_H) +#include +#elif defined (__APPLE__) +#include +#define bswap_16 OSSwapInt16 +#define bswap_32 OSSwapInt32 +#define bswap_64 OSSwapInt64 +#elif defined (__UCLIBC__) +#else +#define bswap_16(value) ((((value) & 0xff) << 8) | ((value) >> 8)) +#define bswap_32(value) (((uint32_t)bswap_16((uint16_t)((value) & 0xffff)) << 16) | (uint32_t)bswap_16((uint16_t)((value) >> 16))) +#define bswap_64(value) (((uint64_t)bswap_32((uint32_t)((value) & 0xffffffff)) << 32) | (uint64_t)bswap_32((uint32_t)((value) >> 32))) +#endif +#endif + +uint64_t hton64(uint64_t val) +{ +#if __BYTE_ORDER == __BIG_ENDIAN + return (val); +#else + return bswap_64(val); +#endif +} + +uint64_t ntoh64(uint64_t val) +{ +#if __BYTE_ORDER == __BIG_ENDIAN + return (val); +#else + return bswap_64(val); +#endif +} + + +KS_DECLARE(ks_ssize_t) kws_read_frame(kws_t *kws, kws_opcode_t *oc, uint8_t **data) +{ + + ks_ssize_t need = 2; + char *maskp; + int ll = 0; + int frag = 0; + int blen; + + kws->body = kws->bbuffer; + kws->packetlen = 0; + + again: + need = 2; + maskp = NULL; + *data = NULL; + + ll = establish_logical_layer(kws); + + if (ll < 0) { + return ll; + } + + if (kws->down) { + return -1; + } + + if (!kws->handshake) { + return kws_close(kws, WS_PROTO_ERR); + } + + if ((kws->datalen = kws_raw_read(kws, kws->buffer, 9, kws->block)) < 0) { + if (kws->datalen == -2) { + return -2; + } + return kws_close(kws, WS_PROTO_ERR); + } + + if (kws->datalen < need) { + if ((kws->datalen += kws_raw_read(kws, kws->buffer + kws->datalen, 9 - kws->datalen, WS_BLOCK)) < need) { + /* too small - protocol err */ + return kws_close(kws, WS_PROTO_ERR); + } + } + + *oc = *kws->buffer & 0xf; + + switch(*oc) { + case WSOC_CLOSE: + { + kws->plen = kws->buffer[1] & 0x7f; + *data = (uint8_t *) &kws->buffer[2]; + return kws_close(kws, 1000); + } + break; + case WSOC_CONTINUATION: + case WSOC_TEXT: + case WSOC_BINARY: + case WSOC_PING: + case WSOC_PONG: + { + int fin = (kws->buffer[0] >> 7) & 1; + int mask = (kws->buffer[1] >> 7) & 1; + + + if (!fin && *oc != WSOC_CONTINUATION) { + frag = 1; + } else if (fin && *oc == WSOC_CONTINUATION) { + frag = 0; + } + + if (mask) { + need += 4; + + if (need > kws->datalen) { + /* too small - protocol err */ + *oc = WSOC_CLOSE; + return kws_close(kws, WS_PROTO_ERR); + } + } + + kws->plen = kws->buffer[1] & 0x7f; + kws->payload = &kws->buffer[2]; + + if (kws->plen == 127) { + uint64_t *u64; + int more = 0; + + need += 8; + + if (need > kws->datalen) { + /* too small - protocol err */ + //*oc = WSOC_CLOSE; + //return kws_close(kws, WS_PROTO_ERR); + + more = kws_raw_read(kws, kws->buffer + kws->datalen, need - kws->datalen, WS_BLOCK); + + if (more < need - kws->datalen) { + *oc = WSOC_CLOSE; + return kws_close(kws, WS_PROTO_ERR); + } else { + kws->datalen += more; + } + + + } + + u64 = (uint64_t *) kws->payload; + kws->payload += 8; + kws->plen = ntoh64(*u64); + } else if (kws->plen == 126) { + uint16_t *u16; + + need += 2; + + if (need > kws->datalen) { + /* too small - protocol err */ + *oc = WSOC_CLOSE; + return kws_close(kws, WS_PROTO_ERR); + } + + u16 = (uint16_t *) kws->payload; + kws->payload += 2; + kws->plen = ntohs(*u16); + } + + if (mask) { + maskp = (char *)kws->payload; + kws->payload += 4; + } + + need = (kws->plen - (kws->datalen - need)); + + if (need < 0) { + /* invalid read - protocol err .. */ + *oc = WSOC_CLOSE; + return kws_close(kws, WS_PROTO_ERR); + } + + blen = kws->body - kws->bbuffer; + + if (need + blen > (ks_ssize_t)kws->bbuflen) { + void *tmp; + + kws->bbuflen = need + blen + kws->rplen; + + if ((tmp = ks_pool_resize(kws->pool, kws->bbuffer, kws->bbuflen))) { + kws->bbuffer = tmp; + } else { + abort(); + } + + kws->body = kws->bbuffer + blen; + } + + kws->rplen = kws->plen - need; + + if (kws->rplen) { + memcpy(kws->body, kws->payload, kws->rplen); + } + + while(need) { + ks_ssize_t r = kws_raw_read(kws, kws->body + kws->rplen, need, WS_BLOCK); + + if (r < 1) { + /* invalid read - protocol err .. */ + *oc = WSOC_CLOSE; + return kws_close(kws, WS_PROTO_ERR); + } + + kws->datalen += r; + kws->rplen += r; + need -= r; + } + + if (mask && maskp) { + ks_ssize_t i; + + for (i = 0; i < kws->datalen; i++) { + kws->body[i] ^= maskp[i % 4]; + } + } + + + if (*oc == WSOC_PING) { + kws_write_frame(kws, WSOC_PONG, kws->body, kws->rplen); + goto again; + } + + *(kws->body+kws->rplen) = '\0'; + kws->packetlen += kws->rplen; + kws->body += kws->rplen; + + if (frag) { + goto again; + } + + *data = (uint8_t *)kws->bbuffer; + + //printf("READ[%ld][%d]-----------------------------:\n[%s]\n-------------------------------\n", kws->packetlen, *oc, (char *)*data); + + + return kws->packetlen; + } + break; + default: + { + /* invalid op code - protocol err .. */ + *oc = WSOC_CLOSE; + return kws_close(kws, WS_PROTO_ERR); + } + break; + } +} + +#if 0 +static ks_ssize_t ws_feed_buf(kws_t *kws, void *data, ks_size_t bytes) +{ + + if (bytes + kws->wdatalen > kws->buflen) { + return -1; + } + + memcpy(kws->wbuffer + kws->wdatalen, data, bytes); + + kws->wdatalen += bytes; + + return bytes; +} + +static ks_ssize_t ws_send_buf(kws_t *kws, kws_opcode_t oc) +{ + ks_ssize_t r = 0; + + if (!kws->wdatalen) { + return -1; + } + + r = ws_write_frame(kws, oc, kws->wbuffer, kws->wdatalen); + + kws->wdatalen = 0; + + return r; +} +#endif + +KS_DECLARE(ks_ssize_t) kws_write_frame(kws_t *kws, kws_opcode_t oc, void *data, ks_size_t bytes) +{ + uint8_t hdr[14] = { 0 }; + ks_size_t hlen = 2; + uint8_t *bp; + ks_ssize_t raw_ret = 0; + + if (kws->down) { + return -1; + } + + //printf("WRITE[%ld]-----------------------------:\n[%s]\n-----------------------------------\n", bytes, (char *) data); + + hdr[0] = (uint8_t)(oc | 0x80); + + if (bytes < 126) { + hdr[1] = (uint8_t)bytes; + } else if (bytes < 0x10000) { + uint16_t *u16; + + hdr[1] = 126; + hlen += 2; + + u16 = (uint16_t *) &hdr[2]; + *u16 = htons((uint16_t) bytes); + + } else { + uint64_t *u64; + + hdr[1] = 127; + hlen += 8; + + u64 = (uint64_t *) &hdr[2]; + *u64 = hton64(bytes); + } + + if (kws->write_buffer_len < (hlen + bytes + 1)) { + void *tmp; + + kws->write_buffer_len = hlen + bytes + 1; + if ((tmp = ks_pool_resize(kws->pool, kws->write_buffer, kws->write_buffer_len))) { + kws->write_buffer = tmp; + } else { + abort(); + } + } + + bp = (uint8_t *) kws->write_buffer; + memcpy(bp, (void *) &hdr[0], hlen); + memcpy(bp + hlen, data, bytes); + + raw_ret = kws_raw_write(kws, bp, (hlen + bytes)); + + if (raw_ret != (ks_ssize_t) (hlen + bytes)) { + return raw_ret; + } + + return bytes; +} + +KS_DECLARE(ks_status_t) kws_get_buffer(kws_t *kws, char **bufP, ks_size_t *buflen) +{ + *bufP = kws->buffer; + *buflen = kws->datalen; + + return KS_STATUS_SUCCESS; +} diff --git a/libs/libks/src/simclist.c b/libs/libks/src/simclist.c index 8333ab86d9..e132022260 100755 --- a/libs/libks/src/simclist.c +++ b/libs/libks/src/simclist.c @@ -23,36 +23,38 @@ #include #include -#include /* for setting errno */ +#include /* for setting errno */ #include #ifndef _WIN32 /* not in Windows! */ -# include -# include +#include +#include #else #include #endif #ifndef SIMCLIST_NO_DUMPRESTORE /* includes for dump/restore */ -# include -# include /* for READ_ERRCHECK() and write() */ -# include /* for open() etc */ -# ifndef _WIN32 -# include /* for htons() on UNIX */ -# else -# include /* for htons() on Windows */ -# endif +#include +#include /* for READ_ERRCHECK() and write() */ +#include /* for open() etc */ +#ifndef _WIN32 +#include /* for htons() on UNIX */ +#else +#include /* for htons() on Windows */ +#endif #endif /* disable asserts */ #ifndef SIMCLIST_DEBUG +#ifndef NDEBUG #define NDEBUG #endif +#endif #include -#include /* for open()'s access modes S_IRUSR etc */ +#include /* for open()'s access modes S_IRUSR etc */ #include #if defined(_MSC_VER) || defined(__MINGW32__) @@ -60,33 +62,34 @@ #ifdef _MSC_VER #pragma comment(lib, "Winmm.lib") #endif -int gettimeofday(struct timeval *tp, void *tzp) { - DWORD t; +int gettimeofday(struct timeval *tp, void *tzp) +{ + DWORD t; - /* XSI says: "If tzp is not a null pointer, the behavior is unspecified" */ - assert(tzp == NULL); + /* XSI says: "If tzp is not a null pointer, the behavior is unspecified" */ + assert(tzp == NULL); - t = timeGetTime(); - tp->tv_sec = t / 1000; - tp->tv_usec = t % 1000; - return 0; + t = timeGetTime(); + tp->tv_sec = t / 1000; + tp->tv_usec = t % 1000; + return 0; } #endif /* work around lack of inttypes.h support in broken Microsoft Visual Studio compilers */ #if !defined(_WIN32) || !defined(_MSC_VER) -# include /* (u)int*_t */ +#include /* (u)int*_t */ #else -# include -typedef UINT8 uint8_t; -typedef UINT16 uint16_t; +#include +typedef UINT8 uint8_t; +typedef UINT16 uint16_t; typedef ULONG32 uint32_t; -typedef UINT64 uint64_t; -typedef INT8 int8_t; -typedef INT16 int16_t; -typedef LONG32 int32_t; -typedef INT64 int64_t; +typedef UINT64 uint64_t; +typedef INT8 int8_t; +typedef INT16 int16_t; +typedef LONG32 int32_t; +typedef INT64 int64_t; #endif @@ -176,21 +179,21 @@ typedef INT64 int64_t; /* list dump declarations */ -#define SIMCLIST_DUMPFORMAT_VERSION 1 /* (short integer) version of fileformat managed by _dump* and _restore* functions */ +#define SIMCLIST_DUMPFORMAT_VERSION 1 /* (short integer) version of fileformat managed by _dump* and _restore* functions */ -#define SIMCLIST_DUMPFORMAT_HEADERLEN 30 /* length of the header */ +#define SIMCLIST_DUMPFORMAT_HEADERLEN 30 /* length of the header */ /* header for a list dump */ struct list_dump_header_s { - uint16_t ver; /* version */ - int32_t timestamp_sec; /* dump timestamp, seconds since UNIX Epoch */ - int32_t timestamp_usec; /* dump timestamp, microseconds since timestamp_sec */ - int32_t rndterm; /* random value terminator -- terminates the data sequence */ + uint16_t ver; /* version */ + int32_t timestamp_sec; /* dump timestamp, seconds since UNIX Epoch */ + int32_t timestamp_usec; /* dump timestamp, microseconds since timestamp_sec */ + int32_t rndterm; /* random value terminator -- terminates the data sequence */ - uint32_t totlistlen; /* sum of every element' size, bytes */ - uint32_t numels; /* number of elements */ - uint32_t elemlen; /* bytes length of an element, for constant-size lists, <= 0 otherwise */ - int32_t listhash; /* hash of the list at the time of dumping, or 0 if to be ignored */ + uint32_t totlistlen; /* sum of every element' size, bytes */ + uint32_t numels; /* number of elements */ + uint32_t elemlen; /* bytes length of an element, for constant-size lists, <= 0 otherwise */ + int32_t listhash; /* hash of the list at the time of dumping, or 0 if to be ignored */ }; @@ -210,13 +213,10 @@ static int list_attrOk(const list_t *restrict l); #endif /* do not inline, this is recursive */ -static void list_sort_quicksort(list_t *restrict l, int versus, - unsigned int first, struct list_entry_s *fel, - unsigned int last, struct list_entry_s *lel); +static void list_sort_quicksort(list_t *restrict l, int versus, unsigned int first, struct list_entry_s *fel, unsigned int last, struct list_entry_s *lel); static inline void list_sort_selectionsort(list_t *restrict l, int versus, - unsigned int first, struct list_entry_s *fel, - unsigned int last, struct list_entry_s *lel); + unsigned int first, struct list_entry_s *fel, unsigned int last, struct list_entry_s *lel); static void *list_get_minmax(const list_t *restrict l, int versus); @@ -248,287 +248,321 @@ static inline struct list_entry_s *list_findpos(const list_t *restrict l, int po static unsigned random_seed = 0; /* use local RNG */ -static inline void seed_random(void) { - if (random_seed == 0) - random_seed = (unsigned)getpid() ^ (unsigned)time(NULL); +static inline void seed_random(void) +{ + if (random_seed == 0) + random_seed = (unsigned) getpid() ^ (unsigned) time(NULL); } -static inline long get_random(void) { - random_seed = (1664525 * random_seed + 1013904223); - return random_seed; +static inline long get_random(void) +{ + random_seed = (1664525 * random_seed + 1013904223); + return random_seed; } #else /* use OS's random generator */ -# define seed_random() -# define get_random() (rand()) +#define seed_random() +#define get_random() (rand()) #endif /* list initialization */ -int list_init(list_t *restrict l) { - if (l == NULL) return -1; +int list_init(list_t *restrict l) +{ + if (l == NULL) + return -1; - seed_random(); + seed_random(); - l->numels = 0; + l->numels = 0; - /* head/tail sentinels and mid pointer */ - l->head_sentinel = (struct list_entry_s *)malloc(sizeof(struct list_entry_s)); - l->tail_sentinel = (struct list_entry_s *)malloc(sizeof(struct list_entry_s)); - l->head_sentinel->next = l->tail_sentinel; - l->tail_sentinel->prev = l->head_sentinel; - l->head_sentinel->prev = l->tail_sentinel->next = l->mid = NULL; - l->head_sentinel->data = l->tail_sentinel->data = NULL; + /* head/tail sentinels and mid pointer */ + l->head_sentinel = (struct list_entry_s *) malloc(sizeof(struct list_entry_s)); + l->tail_sentinel = (struct list_entry_s *) malloc(sizeof(struct list_entry_s)); + l->head_sentinel->next = l->tail_sentinel; + l->tail_sentinel->prev = l->head_sentinel; + l->head_sentinel->prev = l->tail_sentinel->next = l->mid = NULL; + l->head_sentinel->data = l->tail_sentinel->data = NULL; - /* iteration attributes */ - l->iter_active = 0; - l->iter_pos = 0; - l->iter_curentry = NULL; + /* iteration attributes */ + l->iter_active = 0; + l->iter_pos = 0; + l->iter_curentry = NULL; - /* free-list attributes */ - l->spareels = (struct list_entry_s **)malloc(SIMCLIST_MAX_SPARE_ELEMS * sizeof(struct list_entry_s *)); - l->spareelsnum = 0; + /* free-list attributes */ + l->spareels = (struct list_entry_s **) malloc(SIMCLIST_MAX_SPARE_ELEMS * sizeof(struct list_entry_s *)); + l->spareelsnum = 0; #ifdef SIMCLIST_WITH_THREADS - l->threadcount = 0; + l->threadcount = 0; #endif - list_attributes_setdefaults(l); + list_attributes_setdefaults(l); - assert(list_repOk(l)); - assert(list_attrOk(l)); + assert(list_repOk(l)); + assert(list_attrOk(l)); - return 0; + return 0; } -void list_destroy(list_t *restrict l) { - unsigned int i; +void list_destroy(list_t *restrict l) +{ + unsigned int i; - list_clear(l); - for (i = 0; i < l->spareelsnum; i++) { - free(l->spareels[i]); - } - free(l->spareels); - free(l->head_sentinel); - free(l->tail_sentinel); + list_clear(l); + for (i = 0; i < l->spareelsnum; i++) { + free(l->spareels[i]); + } + free(l->spareels); + free(l->head_sentinel); + free(l->tail_sentinel); } -int list_attributes_setdefaults(list_t *restrict l) { - l->attrs.comparator = NULL; - l->attrs.seeker = NULL; +int list_attributes_setdefaults(list_t *restrict l) +{ + l->attrs.comparator = NULL; + l->attrs.seeker = NULL; - /* also free() element data when removing and element from the list */ - l->attrs.meter = NULL; - l->attrs.copy_data = 0; + /* also free() element data when removing and element from the list */ + l->attrs.meter = NULL; + l->attrs.copy_data = 0; - l->attrs.hasher = NULL; + l->attrs.hasher = NULL; - /* serializer/unserializer */ - l->attrs.serializer = NULL; - l->attrs.unserializer = NULL; + /* serializer/unserializer */ + l->attrs.serializer = NULL; + l->attrs.unserializer = NULL; - assert(list_attrOk(l)); + assert(list_attrOk(l)); - return 0; + return 0; } /* setting list properties */ -int list_attributes_comparator(list_t *restrict l, element_comparator comparator_fun) { - if (l == NULL) return -1; +int list_attributes_comparator(list_t *restrict l, element_comparator comparator_fun) +{ + if (l == NULL) + return -1; - l->attrs.comparator = comparator_fun; + l->attrs.comparator = comparator_fun; - assert(list_attrOk(l)); + assert(list_attrOk(l)); - return 0; + return 0; } -int list_attributes_seeker(list_t *restrict l, element_seeker seeker_fun) { - if (l == NULL) return -1; +int list_attributes_seeker(list_t *restrict l, element_seeker seeker_fun) +{ + if (l == NULL) + return -1; - l->attrs.seeker = seeker_fun; - assert(list_attrOk(l)); + l->attrs.seeker = seeker_fun; + assert(list_attrOk(l)); - return 0; + return 0; } -int list_attributes_copy(list_t *restrict l, element_meter metric_fun, int copy_data) { - if (l == NULL || (metric_fun == NULL && copy_data != 0)) return -1; +int list_attributes_copy(list_t *restrict l, element_meter metric_fun, int copy_data) +{ + if (l == NULL || (metric_fun == NULL && copy_data != 0)) + return -1; - l->attrs.meter = metric_fun; - l->attrs.copy_data = copy_data; + l->attrs.meter = metric_fun; + l->attrs.copy_data = copy_data; - assert(list_attrOk(l)); + assert(list_attrOk(l)); - return 0; + return 0; } -int list_attributes_hash_computer(list_t *restrict l, element_hash_computer hash_computer_fun) { - if (l == NULL) return -1; +int list_attributes_hash_computer(list_t *restrict l, element_hash_computer hash_computer_fun) +{ + if (l == NULL) + return -1; - l->attrs.hasher = hash_computer_fun; - assert(list_attrOk(l)); - return 0; + l->attrs.hasher = hash_computer_fun; + assert(list_attrOk(l)); + return 0; } -int list_attributes_serializer(list_t *restrict l, element_serializer serializer_fun) { - if (l == NULL) return -1; +int list_attributes_serializer(list_t *restrict l, element_serializer serializer_fun) +{ + if (l == NULL) + return -1; - l->attrs.serializer = serializer_fun; - assert(list_attrOk(l)); - return 0; + l->attrs.serializer = serializer_fun; + assert(list_attrOk(l)); + return 0; } -int list_attributes_unserializer(list_t *restrict l, element_unserializer unserializer_fun) { - if (l == NULL) return -1; +int list_attributes_unserializer(list_t *restrict l, element_unserializer unserializer_fun) +{ + if (l == NULL) + return -1; - l->attrs.unserializer = unserializer_fun; - assert(list_attrOk(l)); - return 0; + l->attrs.unserializer = unserializer_fun; + assert(list_attrOk(l)); + return 0; } -int list_append(list_t *restrict l, const void *data) { - return list_insert_at(l, data, l->numels); +int list_append(list_t *restrict l, const void *data) +{ + return list_insert_at(l, data, l->numels); } -int list_prepend(list_t *restrict l, const void *data) { - return list_insert_at(l, data, 0); +int list_prepend(list_t *restrict l, const void *data) +{ + return list_insert_at(l, data, 0); } -void *list_fetch(list_t *restrict l) { - return list_extract_at(l, 0); +void *list_fetch(list_t *restrict l) +{ + return list_extract_at(l, 0); } -void *list_get_at(const list_t *restrict l, unsigned int pos) { - struct list_entry_s *tmp; +void *list_get_at(const list_t *restrict l, unsigned int pos) +{ + struct list_entry_s *tmp; - tmp = list_findpos(l, pos); + tmp = list_findpos(l, pos); - return (tmp != NULL ? tmp->data : NULL); + return (tmp != NULL ? tmp->data : NULL); } -void *list_get_max(const list_t *restrict l) { - return list_get_minmax(l, +1); +void *list_get_max(const list_t *restrict l) +{ + return list_get_minmax(l, +1); } -void *list_get_min(const list_t *restrict l) { - return list_get_minmax(l, -1); +void *list_get_min(const list_t *restrict l) +{ + return list_get_minmax(l, -1); } /* REQUIRES {list->numels >= 1} * return the min (versus < 0) or max value (v > 0) in l */ -static void *list_get_minmax(const list_t *restrict l, int versus) { - void *curminmax; - struct list_entry_s *s; +static void *list_get_minmax(const list_t *restrict l, int versus) +{ + void *curminmax; + struct list_entry_s *s; - if (l->attrs.comparator == NULL || l->numels == 0) - return NULL; + if (l->attrs.comparator == NULL || l->numels == 0) + return NULL; - curminmax = l->head_sentinel->next->data; - for (s = l->head_sentinel->next->next; s != l->tail_sentinel; s = s->next) { - if (l->attrs.comparator(curminmax, s->data) * versus > 0) - curminmax = s->data; - } + curminmax = l->head_sentinel->next->data; + for (s = l->head_sentinel->next->next; s != l->tail_sentinel; s = s->next) { + if (l->attrs.comparator(curminmax, s->data) * versus > 0) + curminmax = s->data; + } - return curminmax; + return curminmax; } /* set tmp to point to element at index posstart in l */ -static inline struct list_entry_s *list_findpos(const list_t *restrict l, int posstart) { - struct list_entry_s *ptr; - float x; - int i; +static inline struct list_entry_s *list_findpos(const list_t *restrict l, int posstart) +{ + struct list_entry_s *ptr; + float x; + int i; - /* accept 1 slot overflow for fetching head and tail sentinels */ - if (posstart < -1 || posstart > (int)l->numels) return NULL; + /* accept 1 slot overflow for fetching head and tail sentinels */ + if (posstart < -1 || posstart > (int) l->numels) + return NULL; - x = (float)(posstart+1) / l->numels; - if (x <= 0.25) { - /* first quarter: get to posstart from head */ - for (i = -1, ptr = l->head_sentinel; i < posstart; ptr = ptr->next, i++); - } else if (x < 0.5) { - /* second quarter: get to posstart from mid */ - for (i = (l->numels-1)/2, ptr = l->mid; i > posstart; ptr = ptr->prev, i--); - } else if (x <= 0.75) { - /* third quarter: get to posstart from mid */ - for (i = (l->numels-1)/2, ptr = l->mid; i < posstart; ptr = ptr->next, i++); - } else { - /* fourth quarter: get to posstart from tail */ - for (i = l->numels, ptr = l->tail_sentinel; i > posstart; ptr = ptr->prev, i--); - } + x = (float) (posstart + 1) / l->numels; + if (x <= 0.25) { + /* first quarter: get to posstart from head */ + for (i = -1, ptr = l->head_sentinel; i < posstart; ptr = ptr->next, i++); + } else if (x < 0.5) { + /* second quarter: get to posstart from mid */ + for (i = (l->numels - 1) / 2, ptr = l->mid; i > posstart; ptr = ptr->prev, i--); + } else if (x <= 0.75) { + /* third quarter: get to posstart from mid */ + for (i = (l->numels - 1) / 2, ptr = l->mid; i < posstart; ptr = ptr->next, i++); + } else { + /* fourth quarter: get to posstart from tail */ + for (i = l->numels, ptr = l->tail_sentinel; i > posstart; ptr = ptr->prev, i--); + } - return ptr; + return ptr; } -void *list_extract_at(list_t *restrict l, unsigned int pos) { - struct list_entry_s *tmp; - void *data; +void *list_extract_at(list_t *restrict l, unsigned int pos) +{ + struct list_entry_s *tmp; + void *data; - if (l->iter_active || pos >= l->numels) return NULL; + if (l->iter_active || pos >= l->numels) + return NULL; - tmp = list_findpos(l, pos); - data = tmp->data; + tmp = list_findpos(l, pos); + data = tmp->data; - tmp->data = NULL; /* save data from list_drop_elem() free() */ - list_drop_elem(l, tmp, pos); - l->numels--; + tmp->data = NULL; /* save data from list_drop_elem() free() */ + list_drop_elem(l, tmp, pos); + l->numels--; - assert(list_repOk(l)); + assert(list_repOk(l)); - return data; + return data; } -int list_insert_at(list_t *restrict l, const void *data, unsigned int pos) { - struct list_entry_s *lent, *succ, *prec; +int list_insert_at(list_t *restrict l, const void *data, unsigned int pos) +{ + struct list_entry_s *lent, *succ, *prec; - if (l->iter_active || pos > l->numels) return -1; + if (l->iter_active || pos > l->numels) + return -1; - /* this code optimizes malloc() with a free-list */ - if (l->spareelsnum > 0) { - lent = l->spareels[l->spareelsnum-1]; - l->spareelsnum--; - } else { - lent = (struct list_entry_s *)malloc(sizeof(struct list_entry_s)); - if (lent == NULL) - return -1; - } + /* this code optimizes malloc() with a free-list */ + if (l->spareelsnum > 0) { + lent = l->spareels[l->spareelsnum - 1]; + l->spareelsnum--; + } else { + lent = (struct list_entry_s *) malloc(sizeof(struct list_entry_s)); + if (lent == NULL) + return -1; + } - if (l->attrs.copy_data) { - /* make room for user' data (has to be copied) */ - size_t datalen = l->attrs.meter(data); - lent->data = (struct list_entry_s *)malloc(datalen); - memcpy(lent->data, data, datalen); - } else { - lent->data = (void*)data; - } + if (l->attrs.copy_data) { + /* make room for user' data (has to be copied) */ + size_t datalen = l->attrs.meter(data); + lent->data = (struct list_entry_s *) malloc(datalen); + memcpy(lent->data, data, datalen); + } else { + lent->data = (void *) data; + } - /* actually append element */ - prec = list_findpos(l, pos-1); - succ = prec->next; + /* actually append element */ + prec = list_findpos(l, pos - 1); + succ = prec->next; - prec->next = lent; - lent->prev = prec; - lent->next = succ; - succ->prev = lent; + prec->next = lent; + lent->prev = prec; + lent->next = succ; + succ->prev = lent; - l->numels++; + l->numels++; - /* fix mid pointer */ - if (l->numels == 1) { /* first element, set pointer */ - l->mid = lent; - } else if (l->numels % 2) { /* now odd */ - if (pos >= (l->numels-1)/2) l->mid = l->mid->next; - } else { /* now even */ - if (pos <= (l->numels-1)/2) l->mid = l->mid->prev; - } + /* fix mid pointer */ + if (l->numels == 1) { /* first element, set pointer */ + l->mid = lent; + } else if (l->numels % 2) { /* now odd */ + if (pos >= (l->numels - 1) / 2) + l->mid = l->mid->next; + } else { /* now even */ + if (pos <= (l->numels - 1) / 2) + l->mid = l->mid->prev; + } - assert(list_repOk(l)); + assert(list_repOk(l)); - return 1; + return 1; } -int list_delete(list_t *restrict l, const void *data) { +int list_delete(list_t *restrict l, const void *data) +{ int pos, r; pos = list_locate(l, data); @@ -539,565 +573,609 @@ int list_delete(list_t *restrict l, const void *data) { if (r < 0) return -1; - assert(list_repOk(l)); + assert(list_repOk(l)); return 0; } -int list_delete_at(list_t *restrict l, unsigned int pos) { - struct list_entry_s *delendo; +int list_delete_at(list_t *restrict l, unsigned int pos) +{ + struct list_entry_s *delendo; - if (l->iter_active || pos >= l->numels) return -1; + if (l->iter_active || pos >= l->numels) + return -1; - delendo = list_findpos(l, pos); + delendo = list_findpos(l, pos); - list_drop_elem(l, delendo, pos); + list_drop_elem(l, delendo, pos); - l->numels--; + l->numels--; - assert(list_repOk(l)); + assert(list_repOk(l)); - return 0; + return 0; } -int list_delete_range(list_t *restrict l, unsigned int posstart, unsigned int posend) { - struct list_entry_s *lastvalid, *tmp, *tmp2; - unsigned int numdel, midposafter, i; - int movedx; +int list_delete_range(list_t *restrict l, unsigned int posstart, unsigned int posend) +{ + struct list_entry_s *lastvalid, *tmp, *tmp2; + unsigned int numdel, midposafter, i; + int movedx; - if (l->iter_active || posend < posstart || posend >= l->numels) return -1; + if (l->iter_active || posend < posstart || posend >= l->numels) + return -1; - numdel = posend - posstart + 1; - if (numdel == l->numels) return list_clear(l); + numdel = posend - posstart + 1; + if (numdel == l->numels) + return list_clear(l); - tmp = list_findpos(l, posstart); /* first el to be deleted */ - lastvalid = tmp->prev; /* last valid element */ + tmp = list_findpos(l, posstart); /* first el to be deleted */ + lastvalid = tmp->prev; /* last valid element */ - midposafter = (l->numels-1-numdel)/2; + midposafter = (l->numels - 1 - numdel) / 2; - midposafter = midposafter < posstart ? midposafter : midposafter+numdel; - movedx = midposafter - (l->numels-1)/2; + midposafter = midposafter < posstart ? midposafter : midposafter + numdel; + movedx = midposafter - (l->numels - 1) / 2; - if (movedx > 0) { /* move right */ - for (i = 0; i < (unsigned int)movedx; l->mid = l->mid->next, i++); - } else { /* move left */ - movedx = -movedx; - for (i = 0; i < (unsigned int)movedx; l->mid = l->mid->prev, i++); - } + if (movedx > 0) { /* move right */ + for (i = 0; i < (unsigned int) movedx; l->mid = l->mid->next, i++); + } else { /* move left */ + movedx = -movedx; + for (i = 0; i < (unsigned int) movedx; l->mid = l->mid->prev, i++); + } - assert(posstart == 0 || lastvalid != l->head_sentinel); - i = posstart; - if (l->attrs.copy_data) { - /* also free element data */ - for (; i <= posend; i++) { - tmp2 = tmp; - tmp = tmp->next; - if (tmp2->data != NULL) free(tmp2->data); - if (l->spareelsnum < SIMCLIST_MAX_SPARE_ELEMS) { - l->spareels[l->spareelsnum++] = tmp2; - } else { - free(tmp2); - } - } - } else { - /* only free containers */ - for (; i <= posend; i++) { - tmp2 = tmp; - tmp = tmp->next; - if (l->spareelsnum < SIMCLIST_MAX_SPARE_ELEMS) { - l->spareels[l->spareelsnum++] = tmp2; - } else { - free(tmp2); - } - } - } - assert(i == posend+1 && (posend != l->numels || tmp == l->tail_sentinel)); + assert(posstart == 0 || lastvalid != l->head_sentinel); + i = posstart; + if (l->attrs.copy_data) { + /* also free element data */ + for (; i <= posend; i++) { + tmp2 = tmp; + tmp = tmp->next; + if (tmp2->data != NULL) + free(tmp2->data); + if (l->spareelsnum < SIMCLIST_MAX_SPARE_ELEMS) { + l->spareels[l->spareelsnum++] = tmp2; + } else { + free(tmp2); + } + } + } else { + /* only free containers */ + for (; i <= posend; i++) { + tmp2 = tmp; + tmp = tmp->next; + if (l->spareelsnum < SIMCLIST_MAX_SPARE_ELEMS) { + l->spareels[l->spareelsnum++] = tmp2; + } else { + free(tmp2); + } + } + } + assert(i == posend + 1 && (posend != l->numels || tmp == l->tail_sentinel)); - lastvalid->next = tmp; - tmp->prev = lastvalid; + lastvalid->next = tmp; + tmp->prev = lastvalid; - l->numels -= posend - posstart + 1; + l->numels -= posend - posstart + 1; - assert(list_repOk(l)); + assert(list_repOk(l)); - return numdel; + return numdel; } -int list_clear(list_t *restrict l) { - struct list_entry_s *s; - unsigned int numels; +int list_clear(list_t *restrict l) +{ + struct list_entry_s *s; + unsigned int numels; - /* will be returned */ - numels = l->numels; + /* will be returned */ + numels = l->numels; - if (l->iter_active) return -1; + if (l->iter_active) + return -1; - if (l->attrs.copy_data) { /* also free user data */ - /* spare a loop conditional with two loops: spareing elems and freeing elems */ - for (s = l->head_sentinel->next; l->spareelsnum < SIMCLIST_MAX_SPARE_ELEMS && s != l->tail_sentinel; s = s->next) { - /* move elements as spares as long as there is room */ - if (s->data != NULL) free(s->data); - l->spareels[l->spareelsnum++] = s; - } - while (s != l->tail_sentinel) { - /* free the remaining elems */ - if (s->data != NULL) free(s->data); - s = s->next; - free(s->prev); - } - l->head_sentinel->next = l->tail_sentinel; - l->tail_sentinel->prev = l->head_sentinel; - } else { /* only free element containers */ - /* spare a loop conditional with two loops: spareing elems and freeing elems */ - for (s = l->head_sentinel->next; l->spareelsnum < SIMCLIST_MAX_SPARE_ELEMS && s != l->tail_sentinel; s = s->next) { - /* move elements as spares as long as there is room */ - l->spareels[l->spareelsnum++] = s; - } - while (s != l->tail_sentinel) { - /* free the remaining elems */ - s = s->next; - free(s->prev); - } - l->head_sentinel->next = l->tail_sentinel; - l->tail_sentinel->prev = l->head_sentinel; - } - l->numels = 0; - l->mid = NULL; + if (l->attrs.copy_data) { /* also free user data */ + /* spare a loop conditional with two loops: spareing elems and freeing elems */ + for (s = l->head_sentinel->next; l->spareelsnum < SIMCLIST_MAX_SPARE_ELEMS && s != l->tail_sentinel; s = s->next) { + /* move elements as spares as long as there is room */ + if (s->data != NULL) + free(s->data); + l->spareels[l->spareelsnum++] = s; + } + while (s != l->tail_sentinel) { + /* free the remaining elems */ + if (s->data != NULL) + free(s->data); + s = s->next; + free(s->prev); + } + l->head_sentinel->next = l->tail_sentinel; + l->tail_sentinel->prev = l->head_sentinel; + } else { /* only free element containers */ + /* spare a loop conditional with two loops: spareing elems and freeing elems */ + for (s = l->head_sentinel->next; l->spareelsnum < SIMCLIST_MAX_SPARE_ELEMS && s != l->tail_sentinel; s = s->next) { + /* move elements as spares as long as there is room */ + l->spareels[l->spareelsnum++] = s; + } + while (s != l->tail_sentinel) { + /* free the remaining elems */ + s = s->next; + free(s->prev); + } + l->head_sentinel->next = l->tail_sentinel; + l->tail_sentinel->prev = l->head_sentinel; + } + l->numels = 0; + l->mid = NULL; - assert(list_repOk(l)); + assert(list_repOk(l)); - return numels; + return numels; } -unsigned int list_size(const list_t *restrict l) { - return l->numels; +unsigned int list_size(const list_t *restrict l) +{ + return l->numels; } -int list_empty(const list_t *restrict l) { - return (l->numels == 0); +int list_empty(const list_t *restrict l) +{ + return (l->numels == 0); } -int list_locate(const list_t *restrict l, const void *data) { - struct list_entry_s *el; - int pos = 0; +int list_locate(const list_t *restrict l, const void *data) +{ + struct list_entry_s *el; + int pos = 0; - if (l->attrs.comparator != NULL) { - /* use comparator */ - for (el = l->head_sentinel->next; el != l->tail_sentinel; el = el->next, pos++) { - if (l->attrs.comparator(data, el->data) == 0) break; - } - } else { - /* compare references */ - for (el = l->head_sentinel->next; el != l->tail_sentinel; el = el->next, pos++) { - if (el->data == data) break; - } - } - if (el == l->tail_sentinel) return -1; + if (l->attrs.comparator != NULL) { + /* use comparator */ + for (el = l->head_sentinel->next; el != l->tail_sentinel; el = el->next, pos++) { + if (l->attrs.comparator(data, el->data) == 0) + break; + } + } else { + /* compare references */ + for (el = l->head_sentinel->next; el != l->tail_sentinel; el = el->next, pos++) { + if (el->data == data) + break; + } + } + if (el == l->tail_sentinel) + return -1; - return pos; + return pos; } -void *list_seek(list_t *restrict l, const void *indicator) { - const struct list_entry_s *iter; +void *list_seek(list_t *restrict l, const void *indicator) +{ + const struct list_entry_s *iter; - if (l->attrs.seeker == NULL) return NULL; + if (l->attrs.seeker == NULL) + return NULL; - for (iter = l->head_sentinel->next; iter != l->tail_sentinel; iter = iter->next) { - if (l->attrs.seeker(iter->data, indicator) != 0) return iter->data; - } + for (iter = l->head_sentinel->next; iter != l->tail_sentinel; iter = iter->next) { + if (l->attrs.seeker(iter->data, indicator) != 0) + return iter->data; + } - return NULL; + return NULL; } -int list_contains(const list_t *restrict l, const void *data) { - return (list_locate(l, data) >= 0); +int list_contains(const list_t *restrict l, const void *data) +{ + return (list_locate(l, data) >= 0); } -int list_concat(const list_t *l1, const list_t *l2, list_t *restrict dest) { - struct list_entry_s *el, *srcel; - unsigned int cnt; - int err; +int list_concat(const list_t *l1, const list_t *l2, list_t *restrict dest) +{ + struct list_entry_s *el, *srcel; + unsigned int cnt; + int err; - if (l1 == NULL || l2 == NULL || dest == NULL || l1 == dest || l2 == dest) - return -1; + if (l1 == NULL || l2 == NULL || dest == NULL || l1 == dest || l2 == dest) + return -1; - list_init(dest); + list_init(dest); - dest->numels = l1->numels + l2->numels; - if (dest->numels == 0) - return 0; + dest->numels = l1->numels + l2->numels; + if (dest->numels == 0) + return 0; - /* copy list1 */ - srcel = l1->head_sentinel->next; - el = dest->head_sentinel; - while (srcel != l1->tail_sentinel) { - el->next = (struct list_entry_s *)malloc(sizeof(struct list_entry_s)); - el->next->prev = el; - el = el->next; - el->data = srcel->data; - srcel = srcel->next; - } - dest->mid = el; /* approximate position (adjust later) */ - /* copy list 2 */ - srcel = l2->head_sentinel->next; - while (srcel != l2->tail_sentinel) { - el->next = (struct list_entry_s *)malloc(sizeof(struct list_entry_s)); - el->next->prev = el; - el = el->next; - el->data = srcel->data; - srcel = srcel->next; - } - el->next = dest->tail_sentinel; - dest->tail_sentinel->prev = el; + /* copy list1 */ + srcel = l1->head_sentinel->next; + el = dest->head_sentinel; + while (srcel != l1->tail_sentinel) { + el->next = (struct list_entry_s *) malloc(sizeof(struct list_entry_s)); + el->next->prev = el; + el = el->next; + el->data = srcel->data; + srcel = srcel->next; + } + dest->mid = el; /* approximate position (adjust later) */ + /* copy list 2 */ + srcel = l2->head_sentinel->next; + while (srcel != l2->tail_sentinel) { + el->next = (struct list_entry_s *) malloc(sizeof(struct list_entry_s)); + el->next->prev = el; + el = el->next; + el->data = srcel->data; + srcel = srcel->next; + } + el->next = dest->tail_sentinel; + dest->tail_sentinel->prev = el; - /* fix mid pointer */ - err = l2->numels - l1->numels; - if ((err+1)/2 > 0) { /* correct pos RIGHT (err-1)/2 moves */ - err = (err+1)/2; - for (cnt = 0; cnt < (unsigned int)err; cnt++) dest->mid = dest->mid->next; - } else if (err/2 < 0) { /* correct pos LEFT (err/2)-1 moves */ - err = -err/2; - for (cnt = 0; cnt < (unsigned int)err; cnt++) dest->mid = dest->mid->prev; - } + /* fix mid pointer */ + err = l2->numels - l1->numels; + if ((err + 1) / 2 > 0) { /* correct pos RIGHT (err-1)/2 moves */ + err = (err + 1) / 2; + for (cnt = 0; cnt < (unsigned int) err; cnt++) + dest->mid = dest->mid->next; + } else if (err / 2 < 0) { /* correct pos LEFT (err/2)-1 moves */ + err = -err / 2; + for (cnt = 0; cnt < (unsigned int) err; cnt++) + dest->mid = dest->mid->prev; + } - assert(!(list_repOk(l1) && list_repOk(l2)) || list_repOk(dest)); + assert(!(list_repOk(l1) && list_repOk(l2)) || list_repOk(dest)); - return 0; + return 0; } -int list_sort(list_t *restrict l, int versus) { - if (l->iter_active || l->attrs.comparator == NULL) /* cannot modify list in the middle of an iteration */ - return -1; +int list_sort(list_t *restrict l, int versus) +{ + if (l->iter_active || l->attrs.comparator == NULL) /* cannot modify list in the middle of an iteration */ + return -1; - if (l->numels <= 1) - return 0; - list_sort_quicksort(l, versus, 0, l->head_sentinel->next, l->numels-1, l->tail_sentinel->prev); - assert(list_repOk(l)); - return 0; + if (l->numels <= 1) + return 0; + list_sort_quicksort(l, versus, 0, l->head_sentinel->next, l->numels - 1, l->tail_sentinel->prev); + assert(list_repOk(l)); + return 0; } #ifdef SIMCLIST_WITH_THREADS struct list_sort_wrappedparams { - list_t *restrict l; - int versus; - unsigned int first, last; - struct list_entry_s *fel, *lel; + list_t *restrict l; + int versus; + unsigned int first, last; + struct list_entry_s *fel, *lel; }; -static void *list_sort_quicksort_threadwrapper(void *wrapped_params) { - struct list_sort_wrappedparams *wp = (struct list_sort_wrappedparams *)wrapped_params; - list_sort_quicksort(wp->l, wp->versus, wp->first, wp->fel, wp->last, wp->lel); - free(wp); - pthread_exit(NULL); - return NULL; +static void *list_sort_quicksort_threadwrapper(void *wrapped_params) +{ + struct list_sort_wrappedparams *wp = (struct list_sort_wrappedparams *) wrapped_params; + list_sort_quicksort(wp->l, wp->versus, wp->first, wp->fel, wp->last, wp->lel); + free(wp); + pthread_exit(NULL); + return NULL; } #endif static inline void list_sort_selectionsort(list_t *restrict l, int versus, - unsigned int first, struct list_entry_s *fel, - unsigned int last, struct list_entry_s *lel) { - struct list_entry_s *cursor, *toswap, *firstunsorted; - void *tmpdata; + unsigned int first, struct list_entry_s *fel, unsigned int last, struct list_entry_s *lel) +{ + struct list_entry_s *cursor, *toswap, *firstunsorted; + void *tmpdata; - if (last <= first) /* <= 1-element lists are always sorted */ - return; + if (last <= first) /* <= 1-element lists are always sorted */ + return; - for (firstunsorted = fel; firstunsorted != lel; firstunsorted = firstunsorted->next) { - /* find min or max in the remainder of the list */ - for (toswap = firstunsorted, cursor = firstunsorted->next; cursor != lel->next; cursor = cursor->next) - if (l->attrs.comparator(toswap->data, cursor->data) * -versus > 0) toswap = cursor; - if (toswap != firstunsorted) { /* swap firstunsorted with toswap */ - tmpdata = firstunsorted->data; - firstunsorted->data = toswap->data; - toswap->data = tmpdata; - } - } + for (firstunsorted = fel; firstunsorted != lel; firstunsorted = firstunsorted->next) { + /* find min or max in the remainder of the list */ + for (toswap = firstunsorted, cursor = firstunsorted->next; cursor != lel->next; cursor = cursor->next) + if (l->attrs.comparator(toswap->data, cursor->data) * -versus > 0) + toswap = cursor; + if (toswap != firstunsorted) { /* swap firstunsorted with toswap */ + tmpdata = firstunsorted->data; + firstunsorted->data = toswap->data; + toswap->data = tmpdata; + } + } } -static void list_sort_quicksort(list_t *restrict l, int versus, - unsigned int first, struct list_entry_s *fel, - unsigned int last, struct list_entry_s *lel) { - unsigned int pivotid; - unsigned int i; - register struct list_entry_s *pivot; - struct list_entry_s *left, *right; - void *tmpdata; +static void list_sort_quicksort(list_t *restrict l, int versus, unsigned int first, struct list_entry_s *fel, unsigned int last, struct list_entry_s *lel) +{ + unsigned int pivotid; + unsigned int i; + register struct list_entry_s *pivot; + struct list_entry_s *left, *right; + void *tmpdata; #ifdef SIMCLIST_WITH_THREADS - pthread_t tid; - int traised; + pthread_t tid; + int traised; #endif - if (last <= first) /* <= 1-element lists are always sorted */ - return; + if (last <= first) /* <= 1-element lists are always sorted */ + return; - if (last - first+1 <= SIMCLIST_MINQUICKSORTELS) { - list_sort_selectionsort(l, versus, first, fel, last, lel); - return; - } + if (last - first + 1 <= SIMCLIST_MINQUICKSORTELS) { + list_sort_selectionsort(l, versus, first, fel, last, lel); + return; + } - /* base of iteration: one element list */ - if (! (last > first)) return; + /* base of iteration: one element list */ + if (!(last > first)) + return; - pivotid = (get_random() % (last - first + 1)); - /* pivotid = (last - first + 1) / 2; */ + pivotid = (get_random() % (last - first + 1)); + /* pivotid = (last - first + 1) / 2; */ - /* find pivot */ - if (pivotid < (last - first + 1)/2) { - for (i = 0, pivot = fel; i < pivotid; pivot = pivot->next, i++); - } else { - for (i = last - first, pivot = lel; i > pivotid; pivot = pivot->prev, i--); - } + /* find pivot */ + if (pivotid < (last - first + 1) / 2) { + for (i = 0, pivot = fel; i < pivotid; pivot = pivot->next, i++); + } else { + for (i = last - first, pivot = lel; i > pivotid; pivot = pivot->prev, i--); + } - /* smaller PIVOT bigger */ - left = fel; - right = lel; - /* iterate --- left ---> PIV <--- right --- */ - while (left != pivot && right != pivot) { - for (; left != pivot && (l->attrs.comparator(left->data, pivot->data) * -versus <= 0); left = left->next); - /* left points to a smaller element, or to pivot */ - for (; right != pivot && (l->attrs.comparator(right->data, pivot->data) * -versus >= 0); right = right->prev); - /* right points to a bigger element, or to pivot */ - if (left != pivot && right != pivot) { - /* swap, then move iterators */ - tmpdata = left->data; - left->data = right->data; - right->data = tmpdata; + /* smaller PIVOT bigger */ + left = fel; + right = lel; + /* iterate --- left ---> PIV <--- right --- */ + while (left != pivot && right != pivot) { + for (; left != pivot && (l->attrs.comparator(left->data, pivot->data) * -versus <= 0); left = left->next); + /* left points to a smaller element, or to pivot */ + for (; right != pivot && (l->attrs.comparator(right->data, pivot->data) * -versus >= 0); right = right->prev); + /* right points to a bigger element, or to pivot */ + if (left != pivot && right != pivot) { + /* swap, then move iterators */ + tmpdata = left->data; + left->data = right->data; + right->data = tmpdata; - left = left->next; - right = right->prev; - } - } + left = left->next; + right = right->prev; + } + } - /* now either left points to pivot (end run), or right */ - if (right == pivot) { /* left part longer */ - while (left != pivot) { - if (l->attrs.comparator(left->data, pivot->data) * -versus > 0) { - tmpdata = left->data; - left->data = pivot->prev->data; - pivot->prev->data = pivot->data; - pivot->data = tmpdata; - pivot = pivot->prev; - pivotid--; - if (pivot == left) break; - } else { - left = left->next; - } - } - } else { /* right part longer */ - while (right != pivot) { - if (l->attrs.comparator(right->data, pivot->data) * -versus < 0) { - /* move current right before pivot */ - tmpdata = right->data; - right->data = pivot->next->data; - pivot->next->data = pivot->data; - pivot->data = tmpdata; - pivot = pivot->next; - pivotid++; - if (pivot == right) break; - } else { - right = right->prev; - } - } - } + /* now either left points to pivot (end run), or right */ + if (right == pivot) { /* left part longer */ + while (left != pivot) { + if (l->attrs.comparator(left->data, pivot->data) * -versus > 0) { + tmpdata = left->data; + left->data = pivot->prev->data; + pivot->prev->data = pivot->data; + pivot->data = tmpdata; + pivot = pivot->prev; + pivotid--; + if (pivot == left) + break; + } else { + left = left->next; + } + } + } else { /* right part longer */ + while (right != pivot) { + if (l->attrs.comparator(right->data, pivot->data) * -versus < 0) { + /* move current right before pivot */ + tmpdata = right->data; + right->data = pivot->next->data; + pivot->next->data = pivot->data; + pivot->data = tmpdata; + pivot = pivot->next; + pivotid++; + if (pivot == right) + break; + } else { + right = right->prev; + } + } + } - /* sort sublists A and B : |---A---| pivot |---B---| */ + /* sort sublists A and B : |---A---| pivot |---B---| */ #ifdef SIMCLIST_WITH_THREADS - traised = 0; - if (pivotid > 0) { - /* prepare wrapped args, then start thread */ - if (l->threadcount < SIMCLIST_MAXTHREADS-1) { - struct list_sort_wrappedparams *wp = (struct list_sort_wrappedparams *)malloc(sizeof(struct list_sort_wrappedparams)); - l->threadcount++; - traised = 1; - wp->l = l; - wp->versus = versus; - wp->first = first; - wp->fel = fel; - wp->last = first+pivotid-1; - wp->lel = pivot->prev; - if (pthread_create(&tid, NULL, list_sort_quicksort_threadwrapper, wp) != 0) { - free(wp); - traised = 0; - list_sort_quicksort(l, versus, first, fel, first+pivotid-1, pivot->prev); - } - } else { - list_sort_quicksort(l, versus, first, fel, first+pivotid-1, pivot->prev); - } - } - if (first + pivotid < last) list_sort_quicksort(l, versus, first+pivotid+1, pivot->next, last, lel); - if (traised) { - pthread_join(tid, (void **)NULL); - l->threadcount--; - } + traised = 0; + if (pivotid > 0) { + /* prepare wrapped args, then start thread */ + if (l->threadcount < SIMCLIST_MAXTHREADS - 1) { + struct list_sort_wrappedparams *wp = (struct list_sort_wrappedparams *) malloc(sizeof(struct list_sort_wrappedparams)); + l->threadcount++; + traised = 1; + wp->l = l; + wp->versus = versus; + wp->first = first; + wp->fel = fel; + wp->last = first + pivotid - 1; + wp->lel = pivot->prev; + if (pthread_create(&tid, NULL, list_sort_quicksort_threadwrapper, wp) != 0) { + free(wp); + traised = 0; + list_sort_quicksort(l, versus, first, fel, first + pivotid - 1, pivot->prev); + } + } else { + list_sort_quicksort(l, versus, first, fel, first + pivotid - 1, pivot->prev); + } + } + if (first + pivotid < last) + list_sort_quicksort(l, versus, first + pivotid + 1, pivot->next, last, lel); + if (traised) { + pthread_join(tid, (void **) NULL); + l->threadcount--; + } #else - if (pivotid > 0) list_sort_quicksort(l, versus, first, fel, first+pivotid-1, pivot->prev); - if (first + pivotid < last) list_sort_quicksort(l, versus, first+pivotid+1, pivot->next, last, lel); + if (pivotid > 0) + list_sort_quicksort(l, versus, first, fel, first + pivotid - 1, pivot->prev); + if (first + pivotid < last) + list_sort_quicksort(l, versus, first + pivotid + 1, pivot->next, last, lel); #endif } -int list_iterator_start(list_t *restrict l) { - if (l->iter_active) return 0; - l->iter_pos = 0; - l->iter_active = 1; - l->iter_curentry = l->head_sentinel->next; - return 1; +int list_iterator_start(list_t *restrict l) +{ + if (l->iter_active) + return 0; + l->iter_pos = 0; + l->iter_active = 1; + l->iter_curentry = l->head_sentinel->next; + return 1; } -void *list_iterator_next(list_t *restrict l) { - void *toret; +void *list_iterator_next(list_t *restrict l) +{ + void *toret; - if (! l->iter_active) return NULL; + if (!l->iter_active) + return NULL; - toret = l->iter_curentry->data; - l->iter_curentry = l->iter_curentry->next; - l->iter_pos++; + toret = l->iter_curentry->data; + l->iter_curentry = l->iter_curentry->next; + l->iter_pos++; - return toret; + return toret; } -int list_iterator_hasnext(const list_t *restrict l) { - if (! l->iter_active) return 0; - return (l->iter_pos < l->numels); +int list_iterator_hasnext(const list_t *restrict l) +{ + if (!l->iter_active) + return 0; + return (l->iter_pos < l->numels); } -int list_iterator_stop(list_t *restrict l) { - if (! l->iter_active) return 0; - l->iter_pos = 0; - l->iter_active = 0; - return 1; +int list_iterator_stop(list_t *restrict l) +{ + if (!l->iter_active) + return 0; + l->iter_pos = 0; + l->iter_active = 0; + return 1; } -int list_hash(const list_t *restrict l, list_hash_t *restrict hash) { - struct list_entry_s *x; - list_hash_t tmphash; +int list_hash(const list_t *restrict l, list_hash_t *restrict hash) +{ + struct list_entry_s *x; + list_hash_t tmphash; - assert(hash != NULL); + assert(hash != NULL); - tmphash = l->numels * 2 + 100; - if (l->attrs.hasher == NULL) { + tmphash = l->numels * 2 + 100; + if (l->attrs.hasher == NULL) { #ifdef SIMCLIST_ALLOW_LOCATIONBASED_HASHES - /* ENABLE WITH CARE !! */ + /* ENABLE WITH CARE !! */ #warning "Memlocation-based hash is consistent only for testing modification in the same program run." - int i; + int i; - /* only use element references */ - for (x = l->head_sentinel->next; x != l->tail_sentinel; x = x->next) { - for (i = 0; i < sizeof(x->data); i++) { - tmphash += (tmphash ^ (uintptr_t)x->data); - } - tmphash += tmphash % l->numels; - } + /* only use element references */ + for (x = l->head_sentinel->next; x != l->tail_sentinel; x = x->next) { + for (i = 0; i < sizeof(x->data); i++) { + tmphash += (tmphash ^ (uintptr_t) x->data); + } + tmphash += tmphash % l->numels; + } #else - return -1; + return -1; #endif - } else { - /* hash each element with the user-given function */ - for (x = l->head_sentinel->next; x != l->tail_sentinel; x = x->next) { - tmphash += tmphash ^ l->attrs.hasher(x->data); - tmphash += tmphash % l->numels; - } - } + } else { + /* hash each element with the user-given function */ + for (x = l->head_sentinel->next; x != l->tail_sentinel; x = x->next) { + tmphash += tmphash ^ l->attrs.hasher(x->data); + tmphash += tmphash % l->numels; + } + } - *hash = tmphash; + *hash = tmphash; - return 0; + return 0; } #ifndef SIMCLIST_NO_DUMPRESTORE -int list_dump_getinfo_filedescriptor(int fd, list_dump_info_t *restrict info) { - int32_t terminator_head, terminator_tail; - uint32_t elemlen; - off_t hop; +int list_dump_getinfo_filedescriptor(int fd, list_dump_info_t *restrict info) +{ + int32_t terminator_head, terminator_tail; + uint32_t elemlen; + off_t hop; - /* version */ - READ_ERRCHECK(fd, & info->version, sizeof(info->version)); - info->version = ntohs(info->version); - if (info->version > SIMCLIST_DUMPFORMAT_VERSION) { - errno = EILSEQ; - return -1; - } + /* version */ + READ_ERRCHECK(fd, &info->version, sizeof(info->version)); + info->version = ntohs(info->version); + if (info->version > SIMCLIST_DUMPFORMAT_VERSION) { + errno = EILSEQ; + return -1; + } - /* timestamp.tv_sec and timestamp.tv_usec */ - READ_ERRCHECK(fd, & info->timestamp.tv_sec, sizeof(info->timestamp.tv_sec)); - info->timestamp.tv_sec = ntohl(info->timestamp.tv_sec); - READ_ERRCHECK(fd, & info->timestamp.tv_usec, sizeof(info->timestamp.tv_usec)); - info->timestamp.tv_usec = ntohl(info->timestamp.tv_usec); + /* timestamp.tv_sec and timestamp.tv_usec */ + READ_ERRCHECK(fd, &info->timestamp.tv_sec, sizeof(info->timestamp.tv_sec)); + info->timestamp.tv_sec = ntohl(info->timestamp.tv_sec); + READ_ERRCHECK(fd, &info->timestamp.tv_usec, sizeof(info->timestamp.tv_usec)); + info->timestamp.tv_usec = ntohl(info->timestamp.tv_usec); - /* list terminator (to check thereafter) */ - READ_ERRCHECK(fd, & terminator_head, sizeof(terminator_head)); - terminator_head = ntohl(terminator_head); + /* list terminator (to check thereafter) */ + READ_ERRCHECK(fd, &terminator_head, sizeof(terminator_head)); + terminator_head = ntohl(terminator_head); - /* list size */ - READ_ERRCHECK(fd, & info->list_size, sizeof(info->list_size)); - info->list_size = ntohl(info->list_size); + /* list size */ + READ_ERRCHECK(fd, &info->list_size, sizeof(info->list_size)); + info->list_size = ntohl(info->list_size); - /* number of elements */ - READ_ERRCHECK(fd, & info->list_numels, sizeof(info->list_numels)); - info->list_numels = ntohl(info->list_numels); + /* number of elements */ + READ_ERRCHECK(fd, &info->list_numels, sizeof(info->list_numels)); + info->list_numels = ntohl(info->list_numels); - /* length of each element (for checking for consistency) */ - READ_ERRCHECK(fd, & elemlen, sizeof(elemlen)); - elemlen = ntohl(elemlen); + /* length of each element (for checking for consistency) */ + READ_ERRCHECK(fd, &elemlen, sizeof(elemlen)); + elemlen = ntohl(elemlen); - /* list hash */ - READ_ERRCHECK(fd, & info->list_hash, sizeof(info->list_hash)); - info->list_hash = ntohl(info->list_hash); + /* list hash */ + READ_ERRCHECK(fd, &info->list_hash, sizeof(info->list_hash)); + info->list_hash = ntohl(info->list_hash); - /* check consistency */ - if (elemlen > 0) { - /* constant length, hop by size only */ - hop = info->list_size; - } else { - /* non-constant length, hop by size + all element length blocks */ - hop = info->list_size + elemlen*info->list_numels; - } - if (lseek(fd, hop, SEEK_CUR) == -1) { - return -1; - } + /* check consistency */ + if (elemlen > 0) { + /* constant length, hop by size only */ + hop = info->list_size; + } else { + /* non-constant length, hop by size + all element length blocks */ + hop = info->list_size + elemlen * info->list_numels; + } + if (lseek(fd, hop, SEEK_CUR) == -1) { + return -1; + } - /* read the trailing value and compare with terminator_head */ - READ_ERRCHECK(fd, & terminator_tail, sizeof(terminator_tail)); - terminator_tail = ntohl(terminator_tail); + /* read the trailing value and compare with terminator_head */ + READ_ERRCHECK(fd, &terminator_tail, sizeof(terminator_tail)); + terminator_tail = ntohl(terminator_tail); - if (terminator_head == terminator_tail) - info->consistent = 1; - else - info->consistent = 0; + if (terminator_head == terminator_tail) + info->consistent = 1; + else + info->consistent = 0; - return 0; + return 0; } -int list_dump_getinfo_file(const char *restrict filename, list_dump_info_t *restrict info) { - int fd, ret; +int list_dump_getinfo_file(const char *restrict filename, list_dump_info_t *restrict info) +{ + int fd, ret; - fd = open(filename, O_RDONLY, 0); - if (fd < 0) return -1; + fd = open(filename, O_RDONLY, 0); + if (fd < 0) + return -1; - ret = list_dump_getinfo_filedescriptor(fd, info); - close(fd); + ret = list_dump_getinfo_filedescriptor(fd, info); + close(fd); - return ret; + return ret; } -int list_dump_filedescriptor(const list_t *restrict l, int fd, size_t *restrict len) { - struct list_entry_s *x; - void *ser_buf; - uint32_t bufsize; - struct timeval timeofday; - struct list_dump_header_s header; +int list_dump_filedescriptor(const list_t *restrict l, int fd, size_t *restrict len) +{ + struct list_entry_s *x; + void *ser_buf; + uint32_t bufsize; + struct timeval timeofday; + struct list_dump_header_s header; - if (l->attrs.meter == NULL && l->attrs.serializer == NULL) { - errno = ENOTTY; - return -1; - } + if (l->attrs.meter == NULL && l->attrs.serializer == NULL) { + errno = ENOTTY; + return -1; + } - /**** DUMP FORMAT **** + /**** DUMP FORMAT **** [ ver timestamp | totlen numels elemlen hash | DATA ] @@ -1111,419 +1189,418 @@ int list_dump_filedescriptor(const list_t *restrict l, int fd, size_t *restrict *****/ - /* prepare HEADER */ - /* version */ - header.ver = htons( SIMCLIST_DUMPFORMAT_VERSION ); + /* prepare HEADER */ + /* version */ + header.ver = htons(SIMCLIST_DUMPFORMAT_VERSION); - /* timestamp */ - gettimeofday(&timeofday, NULL); - header.timestamp_sec = htonl(timeofday.tv_sec); - header.timestamp_usec = htonl(timeofday.tv_usec); + /* timestamp */ + gettimeofday(&timeofday, NULL); + header.timestamp_sec = htonl(timeofday.tv_sec); + header.timestamp_usec = htonl(timeofday.tv_usec); - header.rndterm = htonl((int32_t)get_random()); + header.rndterm = htonl((int32_t) get_random()); - /* total list size is postprocessed afterwards */ + /* total list size is postprocessed afterwards */ - /* number of elements */ - header.numels = htonl(l->numels); + /* number of elements */ + header.numels = htonl(l->numels); - /* include an hash, if possible */ - if (l->attrs.hasher != NULL) { - if (htonl(list_hash(l, & header.listhash)) != 0) { - /* could not compute list hash! */ - return -1; - } - } else { - header.listhash = htonl(0); - } + /* include an hash, if possible */ + if (l->attrs.hasher != NULL) { + if (htonl(list_hash(l, &header.listhash)) != 0) { + /* could not compute list hash! */ + return -1; + } + } else { + header.listhash = htonl(0); + } - header.totlistlen = header.elemlen = 0; + header.totlistlen = header.elemlen = 0; - /* leave room for the header at the beginning of the file */ - if (lseek(fd, SIMCLIST_DUMPFORMAT_HEADERLEN, SEEK_SET) < 0) { - /* errno set by lseek() */ - return -1; - } + /* leave room for the header at the beginning of the file */ + if (lseek(fd, SIMCLIST_DUMPFORMAT_HEADERLEN, SEEK_SET) < 0) { + /* errno set by lseek() */ + return -1; + } - /* write CONTENT */ - if (l->numels > 0) { - /* SPECULATE that the list has constant element size */ + /* write CONTENT */ + if (l->numels > 0) { + /* SPECULATE that the list has constant element size */ - if (l->attrs.serializer != NULL) { /* user user-specified serializer */ - /* get preliminary length of serialized element in header.elemlen */ - ser_buf = l->attrs.serializer(l->head_sentinel->next->data, & header.elemlen); - free(ser_buf); - /* request custom serialization of each element */ - for (x = l->head_sentinel->next; x != l->tail_sentinel; x = x->next) { - ser_buf = l->attrs.serializer(x->data, &bufsize); - header.totlistlen += bufsize; - if (header.elemlen != 0) { /* continue on speculation */ - if (header.elemlen != bufsize) { - free(ser_buf); - /* constant element length speculation broken! */ - header.elemlen = 0; - header.totlistlen = 0; - x = l->head_sentinel; - if (lseek(fd, SIMCLIST_DUMPFORMAT_HEADERLEN, SEEK_SET) < 0) { - /* errno set by lseek() */ - return -1; - } - /* restart from the beginning */ - continue; - } - /* speculation confirmed */ - WRITE_ERRCHECK(fd, ser_buf, bufsize); - } else { /* speculation found broken */ - WRITE_ERRCHECK(fd, & bufsize, sizeof(size_t)); - WRITE_ERRCHECK(fd, ser_buf, bufsize); - } - free(ser_buf); - } - } else if (l->attrs.meter != NULL) { - header.elemlen = (uint32_t)l->attrs.meter(l->head_sentinel->next->data); + if (l->attrs.serializer != NULL) { /* user user-specified serializer */ + /* get preliminary length of serialized element in header.elemlen */ + ser_buf = l->attrs.serializer(l->head_sentinel->next->data, &header.elemlen); + free(ser_buf); + /* request custom serialization of each element */ + for (x = l->head_sentinel->next; x != l->tail_sentinel; x = x->next) { + ser_buf = l->attrs.serializer(x->data, &bufsize); + header.totlistlen += bufsize; + if (header.elemlen != 0) { /* continue on speculation */ + if (header.elemlen != bufsize) { + free(ser_buf); + /* constant element length speculation broken! */ + header.elemlen = 0; + header.totlistlen = 0; + x = l->head_sentinel; + if (lseek(fd, SIMCLIST_DUMPFORMAT_HEADERLEN, SEEK_SET) < 0) { + /* errno set by lseek() */ + return -1; + } + /* restart from the beginning */ + continue; + } + /* speculation confirmed */ + WRITE_ERRCHECK(fd, ser_buf, bufsize); + } else { /* speculation found broken */ + WRITE_ERRCHECK(fd, &bufsize, sizeof(size_t)); + WRITE_ERRCHECK(fd, ser_buf, bufsize); + } + free(ser_buf); + } + } else if (l->attrs.meter != NULL) { + header.elemlen = (uint32_t) l->attrs.meter(l->head_sentinel->next->data); - /* serialize the element straight from its data */ - for (x = l->head_sentinel->next; x != l->tail_sentinel; x = x->next) { - bufsize = l->attrs.meter(x->data); - header.totlistlen += bufsize; - if (header.elemlen != 0) { - if (header.elemlen != bufsize) { - /* constant element length speculation broken! */ - header.elemlen = 0; - header.totlistlen = 0; - x = l->head_sentinel; - /* restart from the beginning */ - continue; - } - WRITE_ERRCHECK(fd, x->data, bufsize); - } else { - WRITE_ERRCHECK(fd, &bufsize, sizeof(size_t)); - WRITE_ERRCHECK(fd, x->data, bufsize); - } - } - } - /* adjust endianness */ - header.elemlen = htonl(header.elemlen); - header.totlistlen = htonl(header.totlistlen); - } + /* serialize the element straight from its data */ + for (x = l->head_sentinel->next; x != l->tail_sentinel; x = x->next) { + bufsize = l->attrs.meter(x->data); + header.totlistlen += bufsize; + if (header.elemlen != 0) { + if (header.elemlen != bufsize) { + /* constant element length speculation broken! */ + header.elemlen = 0; + header.totlistlen = 0; + x = l->head_sentinel; + /* restart from the beginning */ + continue; + } + WRITE_ERRCHECK(fd, x->data, bufsize); + } else { + WRITE_ERRCHECK(fd, &bufsize, sizeof(size_t)); + WRITE_ERRCHECK(fd, x->data, bufsize); + } + } + } + /* adjust endianness */ + header.elemlen = htonl(header.elemlen); + header.totlistlen = htonl(header.totlistlen); + } - /* write random terminator */ - WRITE_ERRCHECK(fd, & header.rndterm, sizeof(header.rndterm)); /* list terminator */ + /* write random terminator */ + WRITE_ERRCHECK(fd, &header.rndterm, sizeof(header.rndterm)); /* list terminator */ - /* write header */ - lseek(fd, 0, SEEK_SET); + /* write header */ + lseek(fd, 0, SEEK_SET); - WRITE_ERRCHECK(fd, & header.ver, sizeof(header.ver)); /* version */ - WRITE_ERRCHECK(fd, & header.timestamp_sec, sizeof(header.timestamp_sec)); /* timestamp seconds */ - WRITE_ERRCHECK(fd, & header.timestamp_usec, sizeof(header.timestamp_usec)); /* timestamp microseconds */ - WRITE_ERRCHECK(fd, & header.rndterm, sizeof(header.rndterm)); /* random terminator */ + WRITE_ERRCHECK(fd, &header.ver, sizeof(header.ver)); /* version */ + WRITE_ERRCHECK(fd, &header.timestamp_sec, sizeof(header.timestamp_sec)); /* timestamp seconds */ + WRITE_ERRCHECK(fd, &header.timestamp_usec, sizeof(header.timestamp_usec)); /* timestamp microseconds */ + WRITE_ERRCHECK(fd, &header.rndterm, sizeof(header.rndterm)); /* random terminator */ - WRITE_ERRCHECK(fd, & header.totlistlen, sizeof(header.totlistlen)); /* total length of elements */ - WRITE_ERRCHECK(fd, & header.numels, sizeof(header.numels)); /* number of elements */ - WRITE_ERRCHECK(fd, & header.elemlen, sizeof(header.elemlen)); /* size of each element, or 0 for independent */ - WRITE_ERRCHECK(fd, & header.listhash, sizeof(header.listhash)); /* list hash, or 0 for "ignore" */ + WRITE_ERRCHECK(fd, &header.totlistlen, sizeof(header.totlistlen)); /* total length of elements */ + WRITE_ERRCHECK(fd, &header.numels, sizeof(header.numels)); /* number of elements */ + WRITE_ERRCHECK(fd, &header.elemlen, sizeof(header.elemlen)); /* size of each element, or 0 for independent */ + WRITE_ERRCHECK(fd, &header.listhash, sizeof(header.listhash)); /* list hash, or 0 for "ignore" */ - /* possibly store total written length in "len" */ - if (len != NULL) { - *len = sizeof(header) + ntohl(header.totlistlen); - } + /* possibly store total written length in "len" */ + if (len != NULL) { + *len = sizeof(header) + ntohl(header.totlistlen); + } - return 0; + return 0; } -int list_restore_filedescriptor(list_t *restrict l, int fd, size_t *restrict len) { - struct list_dump_header_s header; - unsigned long cnt; - void *buf; - uint32_t elsize, totreadlen, totmemorylen; +int list_restore_filedescriptor(list_t *restrict l, int fd, size_t *restrict len) +{ + struct list_dump_header_s header; + unsigned long cnt; + void *buf; + uint32_t elsize, totreadlen, totmemorylen; - memset(& header, 0, sizeof(header)); + memset(&header, 0, sizeof(header)); - /* read header */ + /* read header */ - /* version */ - READ_ERRCHECK(fd, &header.ver, sizeof(header.ver)); - header.ver = ntohs(header.ver); - if (header.ver != SIMCLIST_DUMPFORMAT_VERSION) { - errno = EILSEQ; - return -1; - } + /* version */ + READ_ERRCHECK(fd, &header.ver, sizeof(header.ver)); + header.ver = ntohs(header.ver); + if (header.ver != SIMCLIST_DUMPFORMAT_VERSION) { + errno = EILSEQ; + return -1; + } - /* timestamp */ - READ_ERRCHECK(fd, & header.timestamp_sec, sizeof(header.timestamp_sec)); - header.timestamp_sec = ntohl(header.timestamp_sec); - READ_ERRCHECK(fd, & header.timestamp_usec, sizeof(header.timestamp_usec)); - header.timestamp_usec = ntohl(header.timestamp_usec); + /* timestamp */ + READ_ERRCHECK(fd, &header.timestamp_sec, sizeof(header.timestamp_sec)); + header.timestamp_sec = ntohl(header.timestamp_sec); + READ_ERRCHECK(fd, &header.timestamp_usec, sizeof(header.timestamp_usec)); + header.timestamp_usec = ntohl(header.timestamp_usec); - /* list terminator */ - READ_ERRCHECK(fd, & header.rndterm, sizeof(header.rndterm)); + /* list terminator */ + READ_ERRCHECK(fd, &header.rndterm, sizeof(header.rndterm)); - header.rndterm = ntohl(header.rndterm); + header.rndterm = ntohl(header.rndterm); - /* total list size */ - READ_ERRCHECK(fd, & header.totlistlen, sizeof(header.totlistlen)); - header.totlistlen = ntohl(header.totlistlen); + /* total list size */ + READ_ERRCHECK(fd, &header.totlistlen, sizeof(header.totlistlen)); + header.totlistlen = ntohl(header.totlistlen); - /* number of elements */ - READ_ERRCHECK(fd, & header.numels, sizeof(header.numels)); - header.numels = ntohl(header.numels); + /* number of elements */ + READ_ERRCHECK(fd, &header.numels, sizeof(header.numels)); + header.numels = ntohl(header.numels); - /* length of every element, or '0' = variable */ - READ_ERRCHECK(fd, & header.elemlen, sizeof(header.elemlen)); - header.elemlen = ntohl(header.elemlen); + /* length of every element, or '0' = variable */ + READ_ERRCHECK(fd, &header.elemlen, sizeof(header.elemlen)); + header.elemlen = ntohl(header.elemlen); - /* list hash, or 0 = 'ignore' */ - READ_ERRCHECK(fd, & header.listhash, sizeof(header.listhash)); - header.listhash = ntohl(header.listhash); + /* list hash, or 0 = 'ignore' */ + READ_ERRCHECK(fd, &header.listhash, sizeof(header.listhash)); + header.listhash = ntohl(header.listhash); - /* read content */ - totreadlen = totmemorylen = 0; - if (header.elemlen > 0) { - /* elements have constant size = header.elemlen */ - if (l->attrs.unserializer != NULL) { - /* use unserializer */ - buf = malloc(header.elemlen); - for (cnt = 0; cnt < header.numels; cnt++) { - READ_ERRCHECK(fd, buf, header.elemlen); - list_append(l, l->attrs.unserializer(buf, & elsize)); - totmemorylen += elsize; - } - } else { - /* copy verbatim into memory */ - for (cnt = 0; cnt < header.numels; cnt++) { - buf = malloc(header.elemlen); - READ_ERRCHECK(fd, buf, header.elemlen); - list_append(l, buf); - } - totmemorylen = header.numels * header.elemlen; - } - totreadlen = header.numels * header.elemlen; - } else { - /* elements have variable size. Each element is preceded by its size */ - if (l->attrs.unserializer != NULL) { - /* use unserializer */ - for (cnt = 0; cnt < header.numels; cnt++) { - READ_ERRCHECK(fd, & elsize, sizeof(elsize)); - buf = malloc((size_t)elsize); - READ_ERRCHECK(fd, buf, elsize); - totreadlen += elsize; - list_append(l, l->attrs.unserializer(buf, & elsize)); - totmemorylen += elsize; - } - } else { - /* copy verbatim into memory */ - for (cnt = 0; cnt < header.numels; cnt++) { - READ_ERRCHECK(fd, & elsize, sizeof(elsize)); - buf = malloc(elsize); - READ_ERRCHECK(fd, buf, elsize); - totreadlen += elsize; - list_append(l, buf); - } - totmemorylen = totreadlen; - } - } + /* read content */ + totreadlen = totmemorylen = 0; + if (header.elemlen > 0) { + /* elements have constant size = header.elemlen */ + if (l->attrs.unserializer != NULL) { + /* use unserializer */ + buf = malloc(header.elemlen); + for (cnt = 0; cnt < header.numels; cnt++) { + READ_ERRCHECK(fd, buf, header.elemlen); + list_append(l, l->attrs.unserializer(buf, &elsize)); + totmemorylen += elsize; + } + } else { + /* copy verbatim into memory */ + for (cnt = 0; cnt < header.numels; cnt++) { + buf = malloc(header.elemlen); + READ_ERRCHECK(fd, buf, header.elemlen); + list_append(l, buf); + } + totmemorylen = header.numels * header.elemlen; + } + totreadlen = header.numels * header.elemlen; + } else { + /* elements have variable size. Each element is preceded by its size */ + if (l->attrs.unserializer != NULL) { + /* use unserializer */ + for (cnt = 0; cnt < header.numels; cnt++) { + READ_ERRCHECK(fd, &elsize, sizeof(elsize)); + buf = malloc((size_t) elsize); + READ_ERRCHECK(fd, buf, elsize); + totreadlen += elsize; + list_append(l, l->attrs.unserializer(buf, &elsize)); + totmemorylen += elsize; + } + } else { + /* copy verbatim into memory */ + for (cnt = 0; cnt < header.numels; cnt++) { + READ_ERRCHECK(fd, &elsize, sizeof(elsize)); + buf = malloc(elsize); + READ_ERRCHECK(fd, buf, elsize); + totreadlen += elsize; + list_append(l, buf); + } + totmemorylen = totreadlen; + } + } - READ_ERRCHECK(fd, &elsize, sizeof(elsize)); /* read list terminator */ - elsize = ntohl(elsize); + READ_ERRCHECK(fd, &elsize, sizeof(elsize)); /* read list terminator */ + elsize = ntohl(elsize); - /* possibly verify the list consistency */ - /* wrt hash */ - /* don't do that + /* possibly verify the list consistency */ + /* wrt hash */ + /* don't do that if (header.listhash != 0 && header.listhash != list_hash(l)) { errno = ECANCELED; return -1; } - */ + */ - /* wrt header */ - if (totreadlen != header.totlistlen && (int32_t)elsize == header.rndterm) { - errno = EPROTO; - return -1; - } + /* wrt header */ + if (totreadlen != header.totlistlen && (int32_t) elsize == header.rndterm) { + errno = EPROTO; + return -1; + } - /* wrt file */ - if (lseek(fd, 0, SEEK_CUR) != lseek(fd, 0, SEEK_END)) { - errno = EPROTO; - return -1; - } + /* wrt file */ + if (lseek(fd, 0, SEEK_CUR) != lseek(fd, 0, SEEK_END)) { + errno = EPROTO; + return -1; + } - if (len != NULL) { - *len = totmemorylen; - } + if (len != NULL) { + *len = totmemorylen; + } - return 0; + return 0; } -int list_dump_file(const list_t *restrict l, const char *restrict filename, size_t *restrict len) { - int fd, oflag, mode; +int list_dump_file(const list_t *restrict l, const char *restrict filename, size_t *restrict len) +{ + int fd, oflag, mode; #ifndef _WIN32 - oflag = O_RDWR | O_CREAT | O_TRUNC; - mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH; + oflag = O_RDWR | O_CREAT | O_TRUNC; + mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH; #else - oflag = _O_RDWR | _O_CREAT | _O_TRUNC; - mode = _S_IRUSR | _S_IWUSR | _S_IRGRP | _S_IROTH; + oflag = _O_RDWR | _O_CREAT | _O_TRUNC; + mode = _S_IRUSR | _S_IWUSR | _S_IRGRP | _S_IROTH; #endif - fd = open(filename, oflag, mode); - if (fd < 0) return -1; + fd = open(filename, oflag, mode); + if (fd < 0) + return -1; - list_dump_filedescriptor(l, fd, len); - close(fd); + list_dump_filedescriptor(l, fd, len); + close(fd); - return 0; + return 0; } -int list_restore_file(list_t *restrict l, const char *restrict filename, size_t *restrict len) { - int fd; +int list_restore_file(list_t *restrict l, const char *restrict filename, size_t *restrict len) +{ + int fd; - fd = open(filename, O_RDONLY, 0); - if (fd < 0) return -1; + fd = open(filename, O_RDONLY, 0); + if (fd < 0) + return -1; - list_restore_filedescriptor(l, fd, len); - close(fd); + list_restore_filedescriptor(l, fd, len); + close(fd); - return 0; + return 0; } #endif /* ifndef SIMCLIST_NO_DUMPRESTORE */ -static int list_drop_elem(list_t *restrict l, struct list_entry_s *tmp, unsigned int pos) { - if (tmp == NULL) return -1; +static int list_drop_elem(list_t *restrict l, struct list_entry_s *tmp, unsigned int pos) +{ + if (tmp == NULL) + return -1; - /* fix mid pointer. This is wrt the PRE situation */ - if (l->numels % 2) { /* now odd */ - /* sort out the base case by hand */ - if (l->numels == 1) l->mid = NULL; - else if (pos >= l->numels/2) l->mid = l->mid->prev; - } else { /* now even */ - if (pos < l->numels/2) l->mid = l->mid->next; - } + /* fix mid pointer. This is wrt the PRE situation */ + if (l->numels % 2) { /* now odd */ + /* sort out the base case by hand */ + if (l->numels == 1) + l->mid = NULL; + else if (pos >= l->numels / 2) + l->mid = l->mid->prev; + } else { /* now even */ + if (pos < l->numels / 2) + l->mid = l->mid->next; + } - tmp->prev->next = tmp->next; - tmp->next->prev = tmp->prev; + tmp->prev->next = tmp->next; + tmp->next->prev = tmp->prev; - /* free what's to be freed */ - if (l->attrs.copy_data && tmp->data != NULL) - free(tmp->data); + /* free what's to be freed */ + if (l->attrs.copy_data && tmp->data != NULL) + free(tmp->data); - if (l->spareelsnum < SIMCLIST_MAX_SPARE_ELEMS) { - l->spareels[l->spareelsnum++] = tmp; - } else { - free(tmp); - } + if (l->spareelsnum < SIMCLIST_MAX_SPARE_ELEMS) { + l->spareels[l->spareelsnum++] = tmp; + } else { + free(tmp); + } - return 0; + return 0; } /* ready-made comparators and meters */ #define SIMCLIST_NUMBER_COMPARATOR(type) int list_comparator_##type(const void *a, const void *b) { return( *(type *)a < *(type *)b) - (*(type *)a > *(type *)b); } SIMCLIST_NUMBER_COMPARATOR(int8_t) -SIMCLIST_NUMBER_COMPARATOR(int16_t) -SIMCLIST_NUMBER_COMPARATOR(int32_t) -SIMCLIST_NUMBER_COMPARATOR(int64_t) +SIMCLIST_NUMBER_COMPARATOR(int16_t) SIMCLIST_NUMBER_COMPARATOR(int32_t) SIMCLIST_NUMBER_COMPARATOR(int64_t) + SIMCLIST_NUMBER_COMPARATOR(uint8_t) SIMCLIST_NUMBER_COMPARATOR(uint16_t) SIMCLIST_NUMBER_COMPARATOR(uint32_t) SIMCLIST_NUMBER_COMPARATOR(uint64_t) + SIMCLIST_NUMBER_COMPARATOR(float) SIMCLIST_NUMBER_COMPARATOR(double) -SIMCLIST_NUMBER_COMPARATOR(uint8_t) -SIMCLIST_NUMBER_COMPARATOR(uint16_t) -SIMCLIST_NUMBER_COMPARATOR(uint32_t) -SIMCLIST_NUMBER_COMPARATOR(uint64_t) - -SIMCLIST_NUMBER_COMPARATOR(float) -SIMCLIST_NUMBER_COMPARATOR(double) - -int list_comparator_string(const void *a, const void *b) { return strcmp((const char *)b, (const char *)a); } + int list_comparator_string(const void *a, const void *b) +{ + return strcmp((const char *) b, (const char *) a); +} /* ready-made metric functions */ #define SIMCLIST_METER(type) size_t list_meter_##type(const void *el) { if (el) { /* kill compiler whinge */ } return sizeof(type); } -SIMCLIST_METER(int8_t) -SIMCLIST_METER(int16_t) -SIMCLIST_METER(int32_t) -SIMCLIST_METER(int64_t) +SIMCLIST_METER(int8_t) SIMCLIST_METER(int16_t) SIMCLIST_METER(int32_t) SIMCLIST_METER(int64_t) + SIMCLIST_METER(uint8_t) SIMCLIST_METER(uint16_t) SIMCLIST_METER(uint32_t) SIMCLIST_METER(uint64_t) + SIMCLIST_METER(float) SIMCLIST_METER(double) -SIMCLIST_METER(uint8_t) -SIMCLIST_METER(uint16_t) -SIMCLIST_METER(uint32_t) -SIMCLIST_METER(uint64_t) - -SIMCLIST_METER(float) -SIMCLIST_METER(double) - -size_t list_meter_string(const void *el) { return strlen((const char *)el) + 1; } + size_t list_meter_string(const void *el) +{ + return strlen((const char *) el) + 1; +} /* ready-made hashing functions */ #define SIMCLIST_HASHCOMPUTER(type) list_hash_t list_hashcomputer_##type(const void *el) { return (list_hash_t)(*(type *)el); } -SIMCLIST_HASHCOMPUTER(int8_t) -SIMCLIST_HASHCOMPUTER(int16_t) -SIMCLIST_HASHCOMPUTER(int32_t) -SIMCLIST_HASHCOMPUTER(int64_t) +SIMCLIST_HASHCOMPUTER(int8_t) SIMCLIST_HASHCOMPUTER(int16_t) SIMCLIST_HASHCOMPUTER(int32_t) SIMCLIST_HASHCOMPUTER(int64_t) + SIMCLIST_HASHCOMPUTER(uint8_t) SIMCLIST_HASHCOMPUTER(uint16_t) SIMCLIST_HASHCOMPUTER(uint32_t) SIMCLIST_HASHCOMPUTER(uint64_t) + SIMCLIST_HASHCOMPUTER(float) SIMCLIST_HASHCOMPUTER(double) -SIMCLIST_HASHCOMPUTER(uint8_t) -SIMCLIST_HASHCOMPUTER(uint16_t) -SIMCLIST_HASHCOMPUTER(uint32_t) -SIMCLIST_HASHCOMPUTER(uint64_t) + list_hash_t list_hashcomputer_string(const void *el) +{ + size_t l; + list_hash_t hash = 123; + const char *str = (const char *) el; + char plus; -SIMCLIST_HASHCOMPUTER(float) -SIMCLIST_HASHCOMPUTER(double) + for (l = 0; str[l] != '\0'; l++) { + if (l) + plus = (char)(hash ^ str[l]); + else + plus = (char)(hash ^ (str[l] - str[0])); + hash += (plus << (CHAR_BIT * (l % sizeof(list_hash_t)))); + } -list_hash_t list_hashcomputer_string(const void *el) { - size_t l; - list_hash_t hash = 123; - const char *str = (const char *)el; - char plus; - - for (l = 0; str[l] != '\0'; l++) { - if (l) plus = hash ^ str[l]; - else plus = hash ^ (str[l] - str[0]); - hash += (plus << (CHAR_BIT * (l % sizeof(list_hash_t)))); - } - - return hash; + return hash; } #ifndef NDEBUG -static int list_repOk(const list_t *restrict l) { - int ok, i; - struct list_entry_s *s; +static int list_repOk(const list_t *restrict l) +{ + int ok, i; + struct list_entry_s *s; - ok = (l != NULL) && ( - /* head/tail checks */ - (l->head_sentinel != NULL && l->tail_sentinel != NULL) && - (l->head_sentinel != l->tail_sentinel) && (l->head_sentinel->prev == NULL && l->tail_sentinel->next == NULL) && - /* empty list */ - (l->numels > 0 || (l->mid == NULL && l->head_sentinel->next == l->tail_sentinel && l->tail_sentinel->prev == l->head_sentinel)) && - /* spare elements checks */ - l->spareelsnum <= SIMCLIST_MAX_SPARE_ELEMS - ); + ok = (l != NULL) && ( + /* head/tail checks */ + (l->head_sentinel != NULL && l->tail_sentinel != NULL) && + (l->head_sentinel != l->tail_sentinel) && (l->head_sentinel->prev == NULL && l->tail_sentinel->next == NULL) && + /* empty list */ + (l->numels > 0 || (l->mid == NULL && l->head_sentinel->next == l->tail_sentinel && l->tail_sentinel->prev == l->head_sentinel)) + && + /* spare elements checks */ + l->spareelsnum <= SIMCLIST_MAX_SPARE_ELEMS); - if (!ok) return 0; + if (!ok) + return 0; - if (l->numels >= 1) { - /* correct referencing */ - for (i = -1, s = l->head_sentinel; i < (int)(l->numels-1)/2 && s->next != NULL; i++, s = s->next) { - if (s->next->prev != s) break; - } - ok = (i == (int)(l->numels-1)/2 && l->mid == s); - if (!ok) return 0; - for (; s->next != NULL; i++, s = s->next) { - if (s->next->prev != s) break; - } - ok = (i == (int)l->numels && s == l->tail_sentinel); - } + if (l->numels >= 1) { + /* correct referencing */ + for (i = -1, s = l->head_sentinel; i < (int) (l->numels - 1) / 2 && s->next != NULL; i++, s = s->next) { + if (s->next->prev != s) + break; + } + ok = (i == (int) (l->numels - 1) / 2 && l->mid == s); + if (!ok) + return 0; + for (; s->next != NULL; i++, s = s->next) { + if (s->next->prev != s) + break; + } + ok = (i == (int) l->numels && s == l->tail_sentinel); + } - return ok; + return ok; } -static int list_attrOk(const list_t *restrict l) { - int ok; +static int list_attrOk(const list_t *restrict l) +{ + int ok; - ok = (l->attrs.copy_data == 0 || l->attrs.meter != NULL); - return ok; + ok = (l->attrs.copy_data == 0 || l->attrs.meter != NULL); + return ok; } #endif diff --git a/libs/libks/src/utp/utp.h b/libs/libks/src/utp/utp.h new file mode 100644 index 0000000000..9d4ed40cda --- /dev/null +++ b/libs/libks/src/utp/utp.h @@ -0,0 +1,182 @@ +/* + * Copyright (c) 2010-2013 BitTorrent, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef __UTP_H__ +#define __UTP_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include "utp_types.h" + +typedef struct UTPSocket utp_socket; +typedef struct struct_utp_context utp_context; + +enum { + UTP_UDP_DONTFRAG = 2, // Used to be a #define as UDP_IP_DONTFRAG +}; + +enum { + // socket has reveived syn-ack (notification only for outgoing connection completion) + // this implies writability + UTP_STATE_CONNECT = 1, + + // socket is able to send more data + UTP_STATE_WRITABLE = 2, + + // connection closed + UTP_STATE_EOF = 3, + + // socket is being destroyed, meaning all data has been sent if possible. + // it is not valid to refer to the socket after this state change occurs + UTP_STATE_DESTROYING = 4, +}; + +extern const char *utp_state_names[]; + +// Errors codes that can be passed to UTP_ON_ERROR callback +enum { + UTP_ECONNREFUSED = 0, + UTP_ECONNRESET, + UTP_ETIMEDOUT, +}; + +extern const char *utp_error_code_names[]; + +enum { + // callback names + UTP_ON_FIREWALL = 0, + UTP_ON_ACCEPT, + UTP_ON_CONNECT, + UTP_ON_ERROR, + UTP_ON_READ, + UTP_ON_OVERHEAD_STATISTICS, + UTP_ON_STATE_CHANGE, + UTP_GET_READ_BUFFER_SIZE, + UTP_ON_DELAY_SAMPLE, + UTP_GET_UDP_MTU, + UTP_GET_UDP_OVERHEAD, + UTP_GET_MILLISECONDS, + UTP_GET_MICROSECONDS, + UTP_GET_RANDOM, + UTP_LOG, + UTP_SENDTO, + + // context and socket options that may be set/queried + UTP_LOG_NORMAL, + UTP_LOG_MTU, + UTP_LOG_DEBUG, + UTP_SNDBUF, + UTP_RCVBUF, + UTP_TARGET_DELAY, + + UTP_ARRAY_SIZE, // must be last +}; + +extern const char *utp_callback_names[]; + +typedef struct { + utp_context *context; + utp_socket *socket; + size_t len; + uint32 flags; + int callback_type; + const byte *buf; + + union { + const struct sockaddr *address; + int send; + int sample_ms; + int error_code; + int state; + }; + + union { + socklen_t address_len; + int type; + }; +} utp_callback_arguments; + +typedef uint64 utp_callback_t(utp_callback_arguments *); + +// Returned by utp_get_context_stats() +typedef struct { + uint32 _nraw_recv[5]; // total packets recieved less than 300/600/1200/MTU bytes fpr all connections (context-wide) + uint32 _nraw_send[5]; // total packets sent less than 300/600/1200/MTU bytes for all connections (context-wide) +} utp_context_stats; + +// Returned by utp_get_stats() +typedef struct { + uint64 nbytes_recv; // total bytes received + uint64 nbytes_xmit; // total bytes transmitted + uint32 rexmit; // retransmit counter + uint32 fastrexmit; // fast retransmit counter + uint32 nxmit; // transmit counter + uint32 nrecv; // receive counter (total) + uint32 nduprecv; // duplicate receive counter + uint32 mtu_guess; // Best guess at MTU +} utp_socket_stats; + +#define UTP_IOV_MAX 1024 + +// For utp_writev, to writes data from multiple buffers +struct utp_iovec { + void *iov_base; + size_t iov_len; +}; + +// Public Functions +utp_context* utp_init (int version); +void utp_destroy (utp_context *ctx); +void utp_set_callback (utp_context *ctx, int callback_name, utp_callback_t *proc); +void* utp_context_set_userdata (utp_context *ctx, void *userdata); +void* utp_context_get_userdata (utp_context *ctx); +int utp_context_set_option (utp_context *ctx, int opt, int val); +int utp_context_get_option (utp_context *ctx, int opt); +int utp_process_udp (utp_context *ctx, const byte *buf, size_t len, const struct sockaddr *to, socklen_t tolen); +int utp_process_icmp_error (utp_context *ctx, const byte *buffer, size_t len, const struct sockaddr *to, socklen_t tolen); +int utp_process_icmp_fragmentation (utp_context *ctx, const byte *buffer, size_t len, const struct sockaddr *to, socklen_t tolen, uint16 next_hop_mtu); +void utp_check_timeouts (utp_context *ctx); +void utp_issue_deferred_acks (utp_context *ctx); +utp_context_stats* utp_get_context_stats (utp_context *ctx); +utp_socket* utp_create_socket (utp_context *ctx); +void* utp_set_userdata (utp_socket *s, void *userdata); +void* utp_get_userdata (utp_socket *s); +int utp_setsockopt (utp_socket *s, int opt, int val); +int utp_getsockopt (utp_socket *s, int opt); +int utp_connect (utp_socket *s, const struct sockaddr *to, socklen_t tolen); +ssize_t utp_write (utp_socket *s, void *buf, size_t count); +ssize_t utp_writev (utp_socket *s, struct utp_iovec *iovec, size_t num_iovecs); +int utp_getpeername (utp_socket *s, struct sockaddr *addr, socklen_t *addrlen); +void utp_read_drained (utp_socket *s); +int utp_get_delays (utp_socket *s, uint32 *ours, uint32 *theirs, uint32 *age); +utp_socket_stats* utp_get_stats (utp_socket *s); +utp_context* utp_get_context (utp_socket *s); +void utp_close (utp_socket *s); + +#ifdef __cplusplus +} +#endif + +#endif //__UTP_H__ diff --git a/libs/libks/src/utp/utp_api.cpp b/libs/libks/src/utp/utp_api.cpp new file mode 100644 index 0000000000..63aff189c0 --- /dev/null +++ b/libs/libks/src/utp/utp_api.cpp @@ -0,0 +1,139 @@ +// vim:set ts=4 sw=4 ai: + +/* + * Copyright (c) 2010-2013 BitTorrent, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include +#include "utp_internal.h" +#include "utp_utils.h" + +extern "C" { + +const char * utp_callback_names[] = { + "UTP_ON_FIREWALL", + "UTP_ON_ACCEPT", + "UTP_ON_CONNECT", + "UTP_ON_ERROR", + "UTP_ON_READ", + "UTP_ON_OVERHEAD_STATISTICS", + "UTP_ON_STATE_CHANGE", + "UTP_GET_READ_BUFFER_SIZE", + "UTP_ON_DELAY_SAMPLE", + "UTP_GET_UDP_MTU", + "UTP_GET_UDP_OVERHEAD", + "UTP_GET_MILLISECONDS", + "UTP_GET_MICROSECONDS", + "UTP_GET_RANDOM", + "UTP_LOG", + "UTP_SENDTO", +}; + +const char * utp_error_code_names[] = { + "UTP_ECONNREFUSED", + "UTP_ECONNRESET", + "UTP_ETIMEDOUT", +}; + +const char *utp_state_names[] = { + NULL, + "UTP_STATE_CONNECT", + "UTP_STATE_WRITABLE", + "UTP_STATE_EOF", + "UTP_STATE_DESTROYING", +}; + +struct_utp_context::struct_utp_context() + : userdata(NULL) + , current_ms(0) + , last_utp_socket(NULL) + , log_normal(false) + , log_mtu(false) + , log_debug(false) +{ + memset(&context_stats, 0, sizeof(context_stats)); + memset(callbacks, 0, sizeof(callbacks)); + target_delay = CCONTROL_TARGET; + utp_sockets = new UTPSocketHT; + + callbacks[UTP_GET_UDP_MTU] = &utp_default_get_udp_mtu; + callbacks[UTP_GET_UDP_OVERHEAD] = &utp_default_get_udp_overhead; + callbacks[UTP_GET_MILLISECONDS] = &utp_default_get_milliseconds; + callbacks[UTP_GET_MICROSECONDS] = &utp_default_get_microseconds; + callbacks[UTP_GET_RANDOM] = &utp_default_get_random; + + // 1 MB of receive buffer (i.e. max bandwidth delay product) + // means that from a peer with 200 ms RTT, we cannot receive + // faster than 5 MB/s + // from a peer with 10 ms RTT, we cannot receive faster than + // 100 MB/s. This is assumed to be good enough, since bandwidth + // often is proportional to RTT anyway + // when setting a download rate limit, all sockets should have + // their receive buffer set much lower, to say 60 kiB or so + opt_rcvbuf = opt_sndbuf = 1024 * 1024; + last_check = 0; +} + +struct_utp_context::~struct_utp_context() { + delete this->utp_sockets; +} + +utp_context* utp_init (int version) +{ + assert(version == 2); + if (version != 2) + return NULL; + utp_context *ctx = new utp_context; + return ctx; +} + +void utp_destroy(utp_context *ctx) { + assert(ctx); + if (ctx) delete ctx; +} + +void utp_set_callback(utp_context *ctx, int callback_name, utp_callback_t *proc) { + assert(ctx); + if (ctx) ctx->callbacks[callback_name] = proc; +} + +void* utp_context_set_userdata(utp_context *ctx, void *userdata) { + assert(ctx); + if (ctx) ctx->userdata = userdata; + return ctx ? ctx->userdata : NULL; +} + +void* utp_context_get_userdata(utp_context *ctx) { + assert(ctx); + return ctx ? ctx->userdata : NULL; +} + +utp_context_stats* utp_get_context_stats(utp_context *ctx) { + assert(ctx); + return ctx ? &ctx->context_stats : NULL; +} + +ssize_t utp_write(utp_socket *socket, void *buf, size_t len) { + struct utp_iovec iovec = { buf, len }; + return utp_writev(socket, &iovec, 1); +} + +} diff --git a/libs/libks/src/utp/utp_callbacks.cpp b/libs/libks/src/utp/utp_callbacks.cpp new file mode 100644 index 0000000000..9540d8c409 --- /dev/null +++ b/libs/libks/src/utp/utp_callbacks.cpp @@ -0,0 +1,208 @@ +// vim:set ts=4 sw=4 ai: + +/* + * Copyright (c) 2010-2013 BitTorrent, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "utp_callbacks.h" + +int utp_call_on_firewall(utp_context *ctx, const struct sockaddr *address, socklen_t address_len) +{ + utp_callback_arguments args; + if (!ctx->callbacks[UTP_ON_FIREWALL]) return 0; + args.callback_type = UTP_ON_FIREWALL; + args.context = ctx; + args.socket = NULL; + args.address = address; + args.address_len = address_len; + return (int)ctx->callbacks[UTP_ON_FIREWALL](&args); +} + +void utp_call_on_accept(utp_context *ctx, utp_socket *socket, const struct sockaddr *address, socklen_t address_len) +{ + utp_callback_arguments args; + if (!ctx->callbacks[UTP_ON_ACCEPT]) return; + args.callback_type = UTP_ON_ACCEPT; + args.context = ctx; + args.socket = socket; + args.address = address; + args.address_len = address_len; + ctx->callbacks[UTP_ON_ACCEPT](&args); +} + +void utp_call_on_connect(utp_context *ctx, utp_socket *socket) +{ + utp_callback_arguments args; + if (!ctx->callbacks[UTP_ON_CONNECT]) return; + args.callback_type = UTP_ON_CONNECT; + args.context = ctx; + args.socket = socket; + ctx->callbacks[UTP_ON_CONNECT](&args); +} + +void utp_call_on_error(utp_context *ctx, utp_socket *socket, int error_code) +{ + utp_callback_arguments args; + if (!ctx->callbacks[UTP_ON_ERROR]) return; + args.callback_type = UTP_ON_ERROR; + args.context = ctx; + args.socket = socket; + args.error_code = error_code; + ctx->callbacks[UTP_ON_ERROR](&args); +} + +void utp_call_on_read(utp_context *ctx, utp_socket *socket, const byte *buf, size_t len) +{ + utp_callback_arguments args; + if (!ctx->callbacks[UTP_ON_READ]) return; + args.callback_type = UTP_ON_READ; + args.context = ctx; + args.socket = socket; + args.buf = buf; + args.len = len; + ctx->callbacks[UTP_ON_READ](&args); +} + +void utp_call_on_overhead_statistics(utp_context *ctx, utp_socket *socket, int send, size_t len, int type) +{ + utp_callback_arguments args; + if (!ctx->callbacks[UTP_ON_OVERHEAD_STATISTICS]) return; + args.callback_type = UTP_ON_OVERHEAD_STATISTICS; + args.context = ctx; + args.socket = socket; + args.send = send; + args.len = len; + args.type = type; + ctx->callbacks[UTP_ON_OVERHEAD_STATISTICS](&args); +} + +void utp_call_on_delay_sample(utp_context *ctx, utp_socket *socket, int sample_ms) +{ + utp_callback_arguments args; + if (!ctx->callbacks[UTP_ON_DELAY_SAMPLE]) return; + args.callback_type = UTP_ON_DELAY_SAMPLE; + args.context = ctx; + args.socket = socket; + args.sample_ms = sample_ms; + ctx->callbacks[UTP_ON_DELAY_SAMPLE](&args); +} + +void utp_call_on_state_change(utp_context *ctx, utp_socket *socket, int state) +{ + utp_callback_arguments args; + if (!ctx->callbacks[UTP_ON_STATE_CHANGE]) return; + args.callback_type = UTP_ON_STATE_CHANGE; + args.context = ctx; + args.socket = socket; + args.state = state; + ctx->callbacks[UTP_ON_STATE_CHANGE](&args); +} + +uint16 utp_call_get_udp_mtu(utp_context *ctx, utp_socket *socket, const struct sockaddr *address, socklen_t address_len) +{ + utp_callback_arguments args; + if (!ctx->callbacks[UTP_GET_UDP_MTU]) return 0; + args.callback_type = UTP_GET_UDP_MTU; + args.context = ctx; + args.socket = socket; + args.address = address; + args.address_len = address_len; + return (uint16)ctx->callbacks[UTP_GET_UDP_MTU](&args); +} + +uint16 utp_call_get_udp_overhead(utp_context *ctx, utp_socket *socket, const struct sockaddr *address, socklen_t address_len) +{ + utp_callback_arguments args; + if (!ctx->callbacks[UTP_GET_UDP_OVERHEAD]) return 0; + args.callback_type = UTP_GET_UDP_OVERHEAD; + args.context = ctx; + args.socket = socket; + args.address = address; + args.address_len = address_len; + return (uint16)ctx->callbacks[UTP_GET_UDP_OVERHEAD](&args); +} + +uint64 utp_call_get_milliseconds(utp_context *ctx, utp_socket *socket) +{ + utp_callback_arguments args; + if (!ctx->callbacks[UTP_GET_MILLISECONDS]) return 0; + args.callback_type = UTP_GET_MILLISECONDS; + args.context = ctx; + args.socket = socket; + return ctx->callbacks[UTP_GET_MILLISECONDS](&args); +} + +uint64 utp_call_get_microseconds(utp_context *ctx, utp_socket *socket) +{ + utp_callback_arguments args; + if (!ctx->callbacks[UTP_GET_MICROSECONDS]) return 0; + args.callback_type = UTP_GET_MICROSECONDS; + args.context = ctx; + args.socket = socket; + return ctx->callbacks[UTP_GET_MICROSECONDS](&args); +} + +uint32 utp_call_get_random(utp_context *ctx, utp_socket *socket) +{ + utp_callback_arguments args; + if (!ctx->callbacks[UTP_GET_RANDOM]) return 0; + args.callback_type = UTP_GET_RANDOM; + args.context = ctx; + args.socket = socket; + return (uint32)ctx->callbacks[UTP_GET_RANDOM](&args); +} + +size_t utp_call_get_read_buffer_size(utp_context *ctx, utp_socket *socket) +{ + utp_callback_arguments args; + if (!ctx->callbacks[UTP_GET_READ_BUFFER_SIZE]) return 0; + args.callback_type = UTP_GET_READ_BUFFER_SIZE; + args.context = ctx; + args.socket = socket; + return (size_t)ctx->callbacks[UTP_GET_READ_BUFFER_SIZE](&args); +} + +void utp_call_log(utp_context *ctx, utp_socket *socket, const byte *buf) +{ + utp_callback_arguments args; + if (!ctx->callbacks[UTP_LOG]) return; + args.callback_type = UTP_LOG; + args.context = ctx; + args.socket = socket; + args.buf = buf; + ctx->callbacks[UTP_LOG](&args); +} + +void utp_call_sendto(utp_context *ctx, utp_socket *socket, const byte *buf, size_t len, const struct sockaddr *address, socklen_t address_len, uint32 flags) +{ + utp_callback_arguments args; + if (!ctx->callbacks[UTP_SENDTO]) return; + args.callback_type = UTP_SENDTO; + args.context = ctx; + args.socket = socket; + args.buf = buf; + args.len = len; + args.address = address; + args.address_len = address_len; + args.flags = flags; + ctx->callbacks[UTP_SENDTO](&args); +} + diff --git a/libs/libks/src/utp/utp_callbacks.h b/libs/libks/src/utp/utp_callbacks.h new file mode 100644 index 0000000000..649e7e14ff --- /dev/null +++ b/libs/libks/src/utp/utp_callbacks.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2010-2013 BitTorrent, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef __UTP_CALLBACKS_H__ +#define __UTP_CALLBACKS_H__ + +#include "utp.h" +#include "utp_internal.h" + +// Generated by running: grep ^[a-z] utp_callbacks.cpp | sed 's/$/;/' +int utp_call_on_firewall(utp_context *ctx, const struct sockaddr *address, socklen_t address_len); +void utp_call_on_accept(utp_context *ctx, utp_socket *s, const struct sockaddr *address, socklen_t address_len); +void utp_call_on_connect(utp_context *ctx, utp_socket *s); +void utp_call_on_error(utp_context *ctx, utp_socket *s, int error_code); +void utp_call_on_read(utp_context *ctx, utp_socket *s, const byte *buf, size_t len); +void utp_call_on_overhead_statistics(utp_context *ctx, utp_socket *s, int send, size_t len, int type); +void utp_call_on_delay_sample(utp_context *ctx, utp_socket *s, int sample_ms); +void utp_call_on_state_change(utp_context *ctx, utp_socket *s, int state); +uint16 utp_call_get_udp_mtu(utp_context *ctx, utp_socket *s, const struct sockaddr *address, socklen_t address_len); +uint16 utp_call_get_udp_overhead(utp_context *ctx, utp_socket *s, const struct sockaddr *address, socklen_t address_len); +uint64 utp_call_get_milliseconds(utp_context *ctx, utp_socket *s); +uint64 utp_call_get_microseconds(utp_context *ctx, utp_socket *s); +uint32 utp_call_get_random(utp_context *ctx, utp_socket *s); +size_t utp_call_get_read_buffer_size(utp_context *ctx, utp_socket *s); +void utp_call_log(utp_context *ctx, utp_socket *s, const byte *buf); +void utp_call_sendto(utp_context *ctx, utp_socket *s, const byte *buf, size_t len, const struct sockaddr *address, socklen_t address_len, uint32 flags); + +#endif // __UTP_CALLBACKS_H__ diff --git a/libs/libks/src/utp/utp_hash.cpp b/libs/libks/src/utp/utp_hash.cpp new file mode 100644 index 0000000000..a4a71d9068 --- /dev/null +++ b/libs/libks/src/utp/utp_hash.cpp @@ -0,0 +1,246 @@ +/* + * Copyright (c) 2010-2013 BitTorrent, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "utp_hash.h" +#include "utp_types.h" + +#define LIBUTP_HASH_UNUSED ((utp_link_t)-1) + +#ifdef STRICT_ALIGN +inline uint32 Read32(const void *p) +{ + uint32 tmp; + memcpy(&tmp, p, sizeof tmp); + return tmp; +} + +#else +inline uint32 Read32(const void *p) { return *(uint32*)p; } +#endif + + +// Get the amount of memory required for the hash parameters and the bucket set +// Waste a space for an unused bucket in order to ensure the following managed memory have 32-bit aligned addresses +// TODO: make this 64-bit clean +#define BASE_SIZE(bc) (sizeof(utp_hash_t) + sizeof(utp_link_t) * ((bc) + 1)) + +// Get a pointer to the base of the structure array managed by the hash table +#define get_bep(h) ((byte*)(h)) + BASE_SIZE((h)->N) + +// Get the address of the information associated with a specific structure in the array, +// given the address of the base of the structure. +// This assumes a utp_link_t link member is at the end of the structure. +// Given compilers filling out the memory to a 32-bit clean value, this may mean that +// the location named in the structure may not be the location actually used by the hash table, +// since the compiler may have padded the end of the structure with 2 bytes after the utp_link_t member. +// TODO: this macro should not require that the variable pointing at the hash table be named 'hash' +#define ptr_to_link(p) (utp_link_t *) (((byte *) (p)) + hash->E - sizeof(utp_link_t)) + +// Calculate how much to allocate for a hash table with bucket count, total size, and structure count +// TODO: make this 64-bit clean +#define ALLOCATION_SIZE(bc, ts, sc) (BASE_SIZE((bc)) + (ts) * (sc)) + +utp_hash_t *utp_hash_create(int N, int key_size, int total_size, int initial, utp_hash_compute_t hashfun, utp_hash_equal_t compfun) +{ + // Must have odd number of hash buckets (prime number is best) + assert(N % 2); + // Ensure structures will be at aligned memory addresses + // TODO: make this 64-bit clean + assert(0 == (total_size % 4)); + + int size = ALLOCATION_SIZE(N, total_size, initial); + utp_hash_t *hash = (utp_hash_t *) malloc( size ); + memset( hash, 0, size ); + + for (int i = 0; i < N + 1; ++i) + hash->inits[i] = LIBUTP_HASH_UNUSED; + hash->N = N; + hash->K = key_size; + hash->E = total_size; + hash->hash_compute = hashfun; + hash->hash_equal = compfun; + hash->allocated = initial; + hash->count = 0; + hash->used = 0; + hash->free = LIBUTP_HASH_UNUSED; + return hash; +} + +uint utp_hash_mem(const void *keyp, size_t keysize) +{ + uint hash = 0; + uint n = keysize; + while (n >= 4) { + hash ^= Read32(keyp); + keyp = (byte*)keyp + sizeof(uint32); + hash = (hash << 13) | (hash >> 19); + n -= 4; + } + while (n != 0) { + hash ^= *(byte*)keyp; + keyp = (byte*)keyp + sizeof(byte); + hash = (hash << 8) | (hash >> 24); + n--; + } + return hash; +} + +uint utp_hash_mkidx(utp_hash_t *hash, const void *keyp) +{ + // Generate a key from the hash + return hash->hash_compute(keyp, hash->K) % hash->N; +} + +static inline bool compare(byte *a, byte *b,int n) +{ + assert(n >= 4); + if (Read32(a) != Read32(b)) return false; + return memcmp(a+4, b+4, n-4) == 0; +} + +#define COMPARE(h,k1,k2,ks) (((h)->hash_equal) ? (h)->hash_equal((void*)k1,(void*)k2,ks) : compare(k1,k2,ks)) + +// Look-up a key in the hash table. +// Returns NULL if not found +void *utp_hash_lookup(utp_hash_t *hash, const void *key) +{ + utp_link_t idx = utp_hash_mkidx(hash, key); + + // base pointer + byte *bep = get_bep(hash); + + utp_link_t cur = hash->inits[idx]; + while (cur != LIBUTP_HASH_UNUSED) { + byte *key2 = bep + (cur * hash->E); + if (COMPARE(hash, (byte*)key, key2, hash->K)) + return key2; + cur = *ptr_to_link(key2); + } + + return NULL; +} + +// Add a new element to the hash table. +// Returns a pointer to the new element. +// This assumes the element is not already present! +void *utp_hash_add(utp_hash_t **hashp, const void *key) +{ + //Allocate a new entry + byte *elemp; + utp_link_t elem; + utp_hash_t *hash = *hashp; + utp_link_t idx = utp_hash_mkidx(hash, key); + + if ((elem=hash->free) == LIBUTP_HASH_UNUSED) { + utp_link_t all = hash->allocated; + if (hash->used == all) { + utp_hash_t *nhash; + if (all <= (LIBUTP_HASH_UNUSED/2)) { + all *= 2; + } else if (all != LIBUTP_HASH_UNUSED) { + all = LIBUTP_HASH_UNUSED; + } else { + // too many items! can't grow! + assert(0); + return NULL; + } + // otherwise need to allocate. + nhash = (utp_hash_t*)realloc(hash, ALLOCATION_SIZE(hash->N, hash->E, all)); + if (!nhash) { + // out of memory (or too big to allocate) + assert(nhash); + return NULL; + } + hash = *hashp = nhash; + hash->allocated = all; + } + + elem = hash->used++; + elemp = get_bep(hash) + elem * hash->E; + } else { + elemp = get_bep(hash) + elem * hash->E; + hash->free = *ptr_to_link(elemp); + } + + *ptr_to_link(elemp) = hash->inits[idx]; + hash->inits[idx] = elem; + hash->count++; + + // copy key into it + memcpy(elemp, key, hash->K); + return elemp; +} + +// Delete an element from the utp_hash_t +// Returns a pointer to the already deleted element. +void *utp_hash_del(utp_hash_t *hash, const void *key) +{ + utp_link_t idx = utp_hash_mkidx(hash, key); + + // base pointer + byte *bep = get_bep(hash); + + utp_link_t *curp = &hash->inits[idx]; + utp_link_t cur; + while ((cur=*curp) != LIBUTP_HASH_UNUSED) { + byte *key2 = bep + (cur * hash->E); + if (COMPARE(hash,(byte*)key,(byte*)key2, hash->K )) { + // found an item that matched. unlink it + *curp = *ptr_to_link(key2); + // Insert into freelist + *ptr_to_link(key2) = hash->free; + hash->free = cur; + hash->count--; + return key2; + } + curp = ptr_to_link(key2); + } + + return NULL; +} + +void *utp_hash_iterate(utp_hash_t *hash, utp_hash_iterator_t *iter) +{ + utp_link_t elem; + + if ((elem=iter->elem) == LIBUTP_HASH_UNUSED) { + // Find a bucket with an element + utp_link_t buck = iter->bucket + 1; + for(;;) { + if (buck >= hash->N) + return NULL; + if ((elem = hash->inits[buck]) != LIBUTP_HASH_UNUSED) + break; + buck++; + } + iter->bucket = buck; + } + + byte *elemp = get_bep(hash) + (elem * hash->E); + iter->elem = *ptr_to_link(elemp); + return elemp; +} + +void utp_hash_free_mem(utp_hash_t* hash) +{ + free(hash); +} diff --git a/libs/libks/src/utp/utp_hash.h b/libs/libks/src/utp/utp_hash.h new file mode 100644 index 0000000000..72c17e3bde --- /dev/null +++ b/libs/libks/src/utp/utp_hash.h @@ -0,0 +1,146 @@ +/* + * Copyright (c) 2010-2013 BitTorrent, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef __UTP_HASH_H__ +#define __UTP_HASH_H__ + +#include // memset +#include // malloc + +#include "utp_types.h" +#include "utp_templates.h" + +// TODO: make utp_link_t a template parameter to HashTable +typedef uint32 utp_link_t; + +#ifdef _MSC_VER +// Silence the warning about the C99-compliant zero-length array at the end of the structure +#pragma warning (disable: 4200) +#endif + +typedef uint32 (*utp_hash_compute_t)(const void *keyp, size_t keysize); +typedef uint (*utp_hash_equal_t)(const void *key_a, const void *key_b, size_t keysize); + +// In memory the HashTable is laid out as follows: +// ---------------------------- low +// | hash table data members | +// ---------------------------- _ +// | indices | ^ +// | . | | utp_link_t indices into the key-values. +// | . | . +// ---------------------------- - <----- bep +// | keys and values | each key-value pair has size total_size +// | . | +// | . | +// ---------------------------- high +// +// The code depends on the ability of the compiler to pad the length +// of the hash table data members structure to +// a length divisible by 32-bits with no remainder. +// +// Since the number of hash buckets (indices) should be odd, the code +// asserts this and adds one to the hash bucket count to ensure that the +// following key-value pairs array starts on a 32-bit boundary. +// +// The key-value pairs array should start on a 32-bit boundary, otherwise +// processors like the ARM will silently mangle 32-bit data in these structures +// (e.g., turning 0xABCD into 0XCDAB when moving a value from memory to register +// when the memory address is 16 bits offset from a 32-bit boundary), +// also, the value will be stored at an address two bytes lower than the address +// value would ordinarily indicate. +// +// The key-value pair is of type T. The first field in T must +// be the key, i.e., the first K bytes of T contains the key. +// total_size = sizeof(T) and thus sizeof(T) >= sizeof(K) +// +// N is the number of buckets. +// +struct utp_hash_t { + utp_link_t N; + byte K; + byte E; + size_t count; + utp_hash_compute_t hash_compute; + utp_hash_equal_t hash_equal; + utp_link_t allocated; + utp_link_t used; + utp_link_t free; + utp_link_t inits[0]; +}; + +#ifdef _MSC_VER +#pragma warning (default: 4200) +#endif + +struct utp_hash_iterator_t { + utp_link_t bucket; + utp_link_t elem; + + utp_hash_iterator_t() : bucket(0xffffffff), elem(0xffffffff) {} +}; + +uint utp_hash_mem(const void *keyp, size_t keysize); +uint utp_hash_comp(const void *key_a, const void *key_b, size_t keysize); + +utp_hash_t *utp_hash_create(int N, int key_size, int total_size, int initial, utp_hash_compute_t hashfun = utp_hash_mem, utp_hash_equal_t eqfun = NULL); +void *utp_hash_lookup(utp_hash_t *hash, const void *key); +void *utp_hash_add(utp_hash_t **hashp, const void *key); +void *utp_hash_del(utp_hash_t *hash, const void *key); + +void *utp_hash_iterate(utp_hash_t *hash, utp_hash_iterator_t *iter); +void utp_hash_free_mem(utp_hash_t *hash); + +/* + This HashTable requires that T have at least sizeof(K)+sizeof(utp_link_t) bytes. + Usually done like this: + + struct K { + int whatever; + }; + + struct T { + K wtf; + utp_link_t link; // also wtf + }; +*/ + +template class utpHashTable { + utp_hash_t *hash; +public: + static uint compare(const void *k1, const void *k2, size_t ks) { + return *((K*)k1) == *((K*)k2); + } + static uint32 compute_hash(const void *k, size_t ks) { + return ((K*)k)->compute_hash(); + } + void Init() { hash = NULL; } + bool Allocated() { return (hash != NULL); } + void Free() { utp_hash_free_mem(hash); hash = NULL; } + void Create(int N, int initial) { hash = utp_hash_create(N, sizeof(K), sizeof(T), initial, &compute_hash, &compare); } + T *Lookup(const K &key) { return (T*)utp_hash_lookup(hash, &key); } + T *Add(const K &key) { return (T*)utp_hash_add(&hash, &key); } + T *Delete(const K &key) { return (T*)utp_hash_del(hash, &key); } + T *Iterate(utp_hash_iterator_t &iterator) { return (T*)utp_hash_iterate(hash, &iterator); } + size_t GetCount() { return hash->count; } +}; + +#endif //__UTP_HASH_H__ diff --git a/libs/libks/src/utp/utp_internal.cpp b/libs/libks/src/utp/utp_internal.cpp new file mode 100644 index 0000000000..6e3e62737e --- /dev/null +++ b/libs/libks/src/utp/utp_internal.cpp @@ -0,0 +1,3413 @@ +/* + * Copyright (c) 2010-2013 BitTorrent, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include // for UINT_MAX + +#include "utp_types.h" +#include "utp_packedsockaddr.h" +#include "utp_internal.h" +#include "utp_hash.h" + +#define TIMEOUT_CHECK_INTERVAL 500 + +// number of bytes to increase max window size by, per RTT. This is +// scaled down linearly proportional to off_target. i.e. if all packets +// in one window have 0 delay, window size will increase by this number. +// Typically it's less. TCP increases one MSS per RTT, which is 1500 +#define MAX_CWND_INCREASE_BYTES_PER_RTT 3000 +#define CUR_DELAY_SIZE 3 +// experiments suggest that a clock skew of 10 ms per 325 seconds +// is not impossible. Reset delay_base every 13 minutes. The clock +// skew is dealt with by observing the delay base in the other +// direction, and adjusting our own upwards if the opposite direction +// delay base keeps going down +#define DELAY_BASE_HISTORY 13 +#define MAX_WINDOW_DECAY 100 // ms + +#define REORDER_BUFFER_SIZE 32 +#define REORDER_BUFFER_MAX_SIZE 1024 +#define OUTGOING_BUFFER_MAX_SIZE 1024 + +#define PACKET_SIZE 1435 + +// this is the minimum max_window value. It can never drop below this +#define MIN_WINDOW_SIZE 10 + +// if we receive 4 or more duplicate acks, we resend the packet +// that hasn't been acked yet +#define DUPLICATE_ACKS_BEFORE_RESEND 3 + +// Allow a reception window of at least 3 ack_nrs behind seq_nr +// A non-SYN packet with an ack_nr difference greater than this is +// considered suspicious and ignored +#define ACK_NR_ALLOWED_WINDOW DUPLICATE_ACKS_BEFORE_RESEND + +#define RST_INFO_TIMEOUT 10000 +#define RST_INFO_LIMIT 1000 +// 29 seconds determined from measuring many home NAT devices +#define KEEPALIVE_INTERVAL 29000 + + +#define SEQ_NR_MASK 0xFFFF +#define ACK_NR_MASK 0xFFFF +#define TIMESTAMP_MASK 0xFFFFFFFF + +#define DIV_ROUND_UP(num, denom) ((num + denom - 1) / denom) + +// The totals are derived from the following data: +// 45: IPv6 address including embedded IPv4 address +// 11: Scope Id +// 2: Brackets around IPv6 address when port is present +// 6: Port (including colon) +// 1: Terminating null byte +char addrbuf[65]; +#define addrfmt(x, s) x.fmt(s, sizeof(s)) + + +#if (defined(__SVR4) && defined(__sun)) + #pragma pack(1) +#else + #pragma pack(push,1) +#endif + + +// these packet sizes are including the uTP header wich +// is either 20 or 23 bytes depending on version +#define PACKET_SIZE_EMPTY_BUCKET 0 +#define PACKET_SIZE_EMPTY 23 +#define PACKET_SIZE_SMALL_BUCKET 1 +#define PACKET_SIZE_SMALL 373 +#define PACKET_SIZE_MID_BUCKET 2 +#define PACKET_SIZE_MID 723 +#define PACKET_SIZE_BIG_BUCKET 3 +#define PACKET_SIZE_BIG 1400 +#define PACKET_SIZE_HUGE_BUCKET 4 + +struct PACKED_ATTRIBUTE PacketFormatV1 { + // packet_type (4 high bits) + // protocol version (4 low bits) + byte ver_type; + byte version() const { return ver_type & 0xf; } + byte type() const { return ver_type >> 4; } + void set_version(byte v) { ver_type = (ver_type & 0xf0) | (v & 0xf); } + void set_type(byte t) { ver_type = (ver_type & 0xf) | (t << 4); } + + // Type of the first extension header + byte ext; + // connection ID + uint16_big connid; + uint32_big tv_usec; + uint32_big reply_micro; + // receive window size in bytes + uint32_big windowsize; + // Sequence number + uint16_big seq_nr; + // Acknowledgment number + uint16_big ack_nr; +}; + +struct PACKED_ATTRIBUTE PacketFormatAckV1 { + PacketFormatV1 pf; + byte ext_next; + byte ext_len; + byte acks[4]; +}; + +#if (defined(__SVR4) && defined(__sun)) + #pragma pack(0) +#else + #pragma pack(pop) +#endif + +enum { + ST_DATA = 0, // Data packet. + ST_FIN = 1, // Finalize the connection. This is the last packet. + ST_STATE = 2, // State packet. Used to transmit an ACK with no data. + ST_RESET = 3, // Terminate connection forcefully. + ST_SYN = 4, // Connect SYN + ST_NUM_STATES, // used for bounds checking +}; + +static const cstr flagnames[] = { + "ST_DATA","ST_FIN","ST_STATE","ST_RESET","ST_SYN" +}; + +enum CONN_STATE { + CS_UNINITIALIZED = 0, + CS_IDLE, + CS_SYN_SENT, + CS_SYN_RECV, + CS_CONNECTED, + CS_CONNECTED_FULL, + CS_GOT_FIN, + CS_DESTROY_DELAY, + CS_FIN_SENT, + CS_RESET, + CS_DESTROY +}; + +static const cstr statenames[] = { + "UNINITIALIZED", "IDLE","SYN_SENT", "SYN_RECV", "CONNECTED","CONNECTED_FULL","GOT_FIN","DESTROY_DELAY","FIN_SENT","RESET","DESTROY" +}; + +struct OutgoingPacket { + size_t length; + size_t payload; + uint64 time_sent; // microseconds + uint transmissions:31; + bool need_resend:1; + byte data[1]; +}; + +struct SizableCircularBuffer { + // This is the mask. Since it's always a power of 2, adding 1 to this value will return the size. + size_t mask; + // This is the elements that the circular buffer points to + void **elements; + + void *get(size_t i) { assert(elements); return elements ? elements[i & mask] : NULL; } + void put(size_t i, void *data) { assert(elements); elements[i&mask] = data; } + + void grow(size_t item, size_t index); + void ensure_size(size_t item, size_t index) { if (index > mask) grow(item, index); } + size_t size() { return mask + 1; } +}; + +// Item contains the element we want to make space for +// index is the index in the list. +void SizableCircularBuffer::grow(size_t item, size_t index) +{ + // Figure out the new size. + size_t size = mask + 1; + do size *= 2; while (index >= size); + + // Allocate the new buffer + void **buf = (void**)calloc(size, sizeof(void*)); + + size--; + + // Copy elements from the old buffer to the new buffer + for (size_t i = 0; i <= mask; i++) { + buf[(item - index + i) & size] = get(item - index + i); + } + + // Swap to the newly allocated buffer + mask = size; + free(elements); + elements = buf; +} + +// compare if lhs is less than rhs, taking wrapping +// into account. if lhs is close to UINT_MAX and rhs +// is close to 0, lhs is assumed to have wrapped and +// considered smaller +bool wrapping_compare_less(uint32 lhs, uint32 rhs, uint32 mask) +{ + // distance walking from lhs to rhs, downwards + const uint32 dist_down = (lhs - rhs) & mask; + // distance walking from lhs to rhs, upwards + const uint32 dist_up = (rhs - lhs) & mask; + + // if the distance walking up is shorter, lhs + // is less than rhs. If the distance walking down + // is shorter, then rhs is less than lhs + return dist_up < dist_down; +} + +struct DelayHist { + uint32 delay_base; + + // this is the history of delay samples, + // normalized by using the delay_base. These + // values are always greater than 0 and measures + // the queuing delay in microseconds + uint32 cur_delay_hist[CUR_DELAY_SIZE]; + size_t cur_delay_idx; + + // this is the history of delay_base. It's + // a number that doesn't have an absolute meaning + // only relative. It doesn't make sense to initialize + // it to anything other than values relative to + // what's been seen in the real world. + uint32 delay_base_hist[DELAY_BASE_HISTORY]; + size_t delay_base_idx; + // the time when we last stepped the delay_base_idx + uint64 delay_base_time; + + bool delay_base_initialized; + + void clear(uint64 current_ms) + { + delay_base_initialized = false; + delay_base = 0; + cur_delay_idx = 0; + delay_base_idx = 0; + delay_base_time = current_ms; + for (size_t i = 0; i < CUR_DELAY_SIZE; i++) { + cur_delay_hist[i] = 0; + } + for (size_t i = 0; i < DELAY_BASE_HISTORY; i++) { + delay_base_hist[i] = 0; + } + } + + void shift(const uint32 offset) + { + // the offset should never be "negative" + // assert(offset < 0x10000000); + + // increase all of our base delays by this amount + // this is used to take clock skew into account + // by observing the other side's changes in its base_delay + for (size_t i = 0; i < DELAY_BASE_HISTORY; i++) { + delay_base_hist[i] += offset; + } + delay_base += offset; + } + + void add_sample(const uint32 sample, uint64 current_ms) + { + // The two clocks (in the two peers) are assumed not to + // progress at the exact same rate. They are assumed to be + // drifting, which causes the delay samples to contain + // a systematic error, either they are under- + // estimated or over-estimated. This is why we update the + // delay_base every two minutes, to adjust for this. + + // This means the values will keep drifting and eventually wrap. + // We can cross the wrapping boundry in two directions, either + // going up, crossing the highest value, or going down, crossing 0. + + // if the delay_base is close to the max value and sample actually + // wrapped on the other end we would see something like this: + // delay_base = 0xffffff00, sample = 0x00000400 + // sample - delay_base = 0x500 which is the correct difference + + // if the delay_base is instead close to 0, and we got an even lower + // sample (that will eventually update the delay_base), we may see + // something like this: + // delay_base = 0x00000400, sample = 0xffffff00 + // sample - delay_base = 0xfffffb00 + // this needs to be interpreted as a negative number and the actual + // recorded delay should be 0. + + // It is important that all arithmetic that assume wrapping + // is done with unsigned intergers. Signed integers are not guaranteed + // to wrap the way unsigned integers do. At least GCC takes advantage + // of this relaxed rule and won't necessarily wrap signed ints. + + // remove the clock offset and propagation delay. + // delay base is min of the sample and the current + // delay base. This min-operation is subject to wrapping + // and care needs to be taken to correctly choose the + // true minimum. + + // specifically the problem case is when delay_base is very small + // and sample is very large (because it wrapped past zero), sample + // needs to be considered the smaller + + if (!delay_base_initialized) { + // delay_base being 0 suggests that we haven't initialized + // it or its history with any real measurements yet. Initialize + // everything with this sample. + for (size_t i = 0; i < DELAY_BASE_HISTORY; i++) { + // if we don't have a value, set it to the current sample + delay_base_hist[i] = sample; + continue; + } + delay_base = sample; + delay_base_initialized = true; + } + + if (wrapping_compare_less(sample, delay_base_hist[delay_base_idx], TIMESTAMP_MASK)) { + // sample is smaller than the current delay_base_hist entry + // update it + delay_base_hist[delay_base_idx] = sample; + } + + // is sample lower than delay_base? If so, update delay_base + if (wrapping_compare_less(sample, delay_base, TIMESTAMP_MASK)) { + // sample is smaller than the current delay_base + // update it + delay_base = sample; + } + + // this operation may wrap, and is supposed to + const uint32 delay = sample - delay_base; + // sanity check. If this is triggered, something fishy is going on + // it means the measured sample was greater than 32 seconds! + //assert(delay < 0x2000000); + + cur_delay_hist[cur_delay_idx] = delay; + cur_delay_idx = (cur_delay_idx + 1) % CUR_DELAY_SIZE; + + // once every minute + if (current_ms - delay_base_time > 60 * 1000) { + delay_base_time = current_ms; + delay_base_idx = (delay_base_idx + 1) % DELAY_BASE_HISTORY; + // clear up the new delay base history spot by initializing + // it to the current sample, then update it + delay_base_hist[delay_base_idx] = sample; + delay_base = delay_base_hist[0]; + // Assign the lowest delay in the last 2 minutes to delay_base + for (size_t i = 0; i < DELAY_BASE_HISTORY; i++) { + if (wrapping_compare_less(delay_base_hist[i], delay_base, TIMESTAMP_MASK)) + delay_base = delay_base_hist[i]; + } + } + } + + uint32 get_value() + { + uint32 value = UINT_MAX; + for (size_t i = 0; i < CUR_DELAY_SIZE; i++) { + value = min(cur_delay_hist[i], value); + } + // value could be UINT_MAX if we have no samples yet... + return value; + } +}; + +struct UTPSocket { + ~UTPSocket(); + + PackedSockAddr addr; + utp_context *ctx; + + int ida; //for ack socket list + + uint16 retransmit_count; + + uint16 reorder_count; + byte duplicate_ack; + + // the number of packets in the send queue. Packets that haven't + // yet been sent count as well as packets marked as needing resend + // the oldest un-acked packet in the send queue is seq_nr - cur_window_packets + uint16 cur_window_packets; + + // how much of the window is used, number of bytes in-flight + // packets that have not yet been sent do not count, packets + // that are marked as needing to be re-sent (due to a timeout) + // don't count either + size_t cur_window; + // maximum window size, in bytes + size_t max_window; + // UTP_SNDBUF setting, in bytes + size_t opt_sndbuf; + // UTP_RCVBUF setting, in bytes + size_t opt_rcvbuf; + + // this is the target delay, in microseconds + // for this socket. defaults to 100000. + size_t target_delay; + + // Is a FIN packet in the reassembly buffer? + bool got_fin:1; + // Timeout procedure + bool fast_timeout:1; + + // max receive window for other end, in bytes + size_t max_window_user; + CONN_STATE state; + // TickCount when we last decayed window (wraps) + int64 last_rwin_decay; + + // the sequence number of the FIN packet. This field is only set + // when we have received a FIN, and the flag field has the FIN flag set. + // it is used to know when it is safe to destroy the socket, we must have + // received all packets up to this sequence number first. + uint16 eof_pkt; + + // All sequence numbers up to including this have been properly received + // by us + uint16 ack_nr; + // This is the sequence number for the next packet to be sent. + uint16 seq_nr; + + uint16 timeout_seq_nr; + + // This is the sequence number of the next packet we're allowed to + // do a fast resend with. This makes sure we only do a fast-resend + // once per packet. We can resend the packet with this sequence number + // or any later packet (with a higher sequence number). + uint16 fast_resend_seq_nr; + + uint32 reply_micro; + + uint64 last_got_packet; + uint64 last_sent_packet; + uint64 last_measured_delay; + + // timestamp of the last time the cwnd was full + // this is used to prevent the congestion window + // from growing when we're not sending at capacity + mutable uint64 last_maxed_out_window; + + void *userdata; + + // Round trip time + uint rtt; + // Round trip time variance + uint rtt_var; + // Round trip timeout + uint rto; + DelayHist rtt_hist; + uint retransmit_timeout; + // The RTO timer will timeout here. + uint64 rto_timeout; + // When the window size is set to zero, start this timer. It will send a new packet every 30secs. + uint64 zerowindow_time; + + uint32 conn_seed; + // Connection ID for packets I receive + uint32 conn_id_recv; + // Connection ID for packets I send + uint32 conn_id_send; + // Last rcv window we advertised, in bytes + size_t last_rcv_win; + + DelayHist our_hist; + DelayHist their_hist; + + // extension bytes from SYN packet + byte extensions[8]; + + // MTU Discovery + // time when we should restart the MTU discovery + uint64 mtu_discover_time; + // ceiling and floor of binary search. last is the mtu size + // we're currently using + uint32 mtu_ceiling, mtu_floor, mtu_last; + // we only ever have a single probe in flight at any given time. + // this is the sequence number of that probe, and the size of + // that packet + uint32 mtu_probe_seq, mtu_probe_size; + + // this is the average delay samples, as compared to the initial + // sample. It's averaged over 5 seconds + int32 average_delay; + // this is the sum of all the delay samples + // we've made recently. The important distinction + // of these samples is that they are all made compared + // to the initial sample, this is to deal with + // wrapping in a simple way. + int64 current_delay_sum; + // number of sample ins current_delay_sum + int current_delay_samples; + // initialized to 0, set to the first raw delay sample + // each sample that's added to current_delay_sum + // is subtracted from the value first, to make it + // a delay relative to this sample + uint32 average_delay_base; + // the next time we should add an average delay + // sample into average_delay_hist + uint64 average_sample_time; + // the estimated clock drift between our computer + // and the endpoint computer. The unit is microseconds + // per 5 seconds + int32 clock_drift; + // just used for logging + int32 clock_drift_raw; + + SizableCircularBuffer inbuf, outbuf; + + #ifdef _DEBUG + // Public per-socket statistics, returned by utp_get_stats() + utp_socket_stats _stats; + #endif + + // true if we're in slow-start (exponential growth) phase + bool slow_start; + + // the slow-start threshold, in bytes + size_t ssthresh; + + void log(int level, char const *fmt, ...) + { + va_list va; + char buf[4096], buf2[4096]; + + va_start(va, fmt); + vsnprintf(buf, 4096, fmt, va); + va_end(va); + buf[4095] = '\0'; + + snprintf(buf2, 4096, "%p %s %06d %s", this, addrfmt(addr, addrbuf), conn_id_recv, buf); + buf2[4095] = '\0'; + + ctx->log(level, this, buf2); + } + + void schedule_ack(); + + // called every time mtu_floor or mtu_ceiling are adjusted + void mtu_search_update(); + void mtu_reset(); + + // Calculates the current receive window + size_t get_rcv_window() + { + // Trim window down according to what's already in buffer. + const size_t numbuf = utp_call_get_read_buffer_size(this->ctx, this); + assert((int)numbuf >= 0); + return opt_rcvbuf > numbuf ? opt_rcvbuf - numbuf : 0; + } + + // Test if we're ready to decay max_window + // XXX this breaks when spaced by > INT_MAX/2, which is 49 + // days; the failure mode in that case is we do an extra decay + // or fail to do one when we really shouldn't. + bool can_decay_win(int64 msec) const + { + return (msec - last_rwin_decay) >= MAX_WINDOW_DECAY; + } + + // If we can, decay max window, returns true if we actually did so + void maybe_decay_win(uint64 current_ms) + { + if (can_decay_win(current_ms)) { + // TCP uses 0.5 + max_window = (size_t)(max_window * .5); + last_rwin_decay = current_ms; + if (max_window < MIN_WINDOW_SIZE) + max_window = MIN_WINDOW_SIZE; + slow_start = false; + ssthresh = max_window; + } + } + + size_t get_header_size() const + { + return sizeof(PacketFormatV1); + } + + size_t get_udp_mtu() + { + socklen_t len; + SOCKADDR_STORAGE sa = addr.get_sockaddr_storage(&len); + return utp_call_get_udp_mtu(this->ctx, this, (const struct sockaddr *)&sa, len); + } + + size_t get_udp_overhead() + { + socklen_t len; + SOCKADDR_STORAGE sa = addr.get_sockaddr_storage(&len); + return utp_call_get_udp_overhead(this->ctx, this, (const struct sockaddr *)&sa, len); + } + + size_t get_overhead() + { + return get_udp_overhead() + get_header_size(); + } + + void send_data(byte* b, size_t length, bandwidth_type_t type, uint32 flags = 0); + + void send_ack(bool synack = false); + + void send_keep_alive(); + + static void send_rst(utp_context *ctx, + const PackedSockAddr &addr, uint32 conn_id_send, + uint16 ack_nr, uint16 seq_nr); + + void send_packet(OutgoingPacket *pkt); + + bool is_full(int bytes = -1); + bool flush_packets(); + void write_outgoing_packet(size_t payload, uint flags, struct utp_iovec *iovec, size_t num_iovecs); + + #ifdef _DEBUG + void check_invariant(); + #endif + + void check_timeouts(); + int ack_packet(uint16 seq); + size_t selective_ack_bytes(uint base, const byte* mask, byte len, int64& min_rtt); + void selective_ack(uint base, const byte *mask, byte len); + void apply_ccontrol(size_t bytes_acked, uint32 actual_delay, int64 min_rtt); + size_t get_packet_size() const; +}; + +void removeSocketFromAckList(UTPSocket *conn) +{ + if (conn->ida >= 0) + { + UTPSocket *last = conn->ctx->ack_sockets[conn->ctx->ack_sockets.GetCount() - 1]; + + assert(last->ida < (int)(conn->ctx->ack_sockets.GetCount())); + assert(conn->ctx->ack_sockets[last->ida] == last); + last->ida = conn->ida; + conn->ctx->ack_sockets[conn->ida] = last; + conn->ida = -1; + + // Decrease the count + conn->ctx->ack_sockets.SetCount(conn->ctx->ack_sockets.GetCount() - 1); + } +} + +static void utp_register_sent_packet(utp_context *ctx, size_t length) +{ + if (length <= PACKET_SIZE_MID) { + if (length <= PACKET_SIZE_EMPTY) { + ctx->context_stats._nraw_send[PACKET_SIZE_EMPTY_BUCKET]++; + } else if (length <= PACKET_SIZE_SMALL) { + ctx->context_stats._nraw_send[PACKET_SIZE_SMALL_BUCKET]++; + } else + ctx->context_stats._nraw_send[PACKET_SIZE_MID_BUCKET]++; + } else { + if (length <= PACKET_SIZE_BIG) { + ctx->context_stats._nraw_send[PACKET_SIZE_BIG_BUCKET]++; + } else + ctx->context_stats._nraw_send[PACKET_SIZE_HUGE_BUCKET]++; + } +} + +void send_to_addr(utp_context *ctx, const byte *p, size_t len, const PackedSockAddr &addr, int flags = 0) +{ + socklen_t tolen; + SOCKADDR_STORAGE to = addr.get_sockaddr_storage(&tolen); + utp_register_sent_packet(ctx, len); + utp_call_sendto(ctx, NULL, p, len, (const struct sockaddr *)&to, tolen, flags); +} + +void UTPSocket::schedule_ack() +{ + if (ida == -1){ + #if UTP_DEBUG_LOGGING + log(UTP_LOG_DEBUG, "schedule_ack"); + #endif + ida = ctx->ack_sockets.Append(this); + } else { + #if UTP_DEBUG_LOGGING + log(UTP_LOG_DEBUG, "schedule_ack: already in list"); + #endif + } +} + +void UTPSocket::send_data(byte* b, size_t length, bandwidth_type_t type, uint32 flags) +{ + // time stamp this packet with local time, the stamp goes into + // the header of every packet at the 8th byte for 8 bytes : + // two integers, check packet.h for more + uint64 time = utp_call_get_microseconds(ctx, this); + + PacketFormatV1* b1 = (PacketFormatV1*)b; + b1->tv_usec = (uint32)time; + b1->reply_micro = reply_micro; + + last_sent_packet = ctx->current_ms; + + #ifdef _DEBUG + _stats.nbytes_xmit += length; + ++_stats.nxmit; + #endif + + if (ctx->callbacks[UTP_ON_OVERHEAD_STATISTICS]) { + size_t n; + if (type == payload_bandwidth) { + // if this packet carries payload, just + // count the header as overhead + type = header_overhead; + n = get_overhead(); + } else { + n = length + get_udp_overhead(); + } + utp_call_on_overhead_statistics(ctx, this, true, n, type); + } +#if UTP_DEBUG_LOGGING + int flags2 = b1->type(); + uint16 seq_nr = b1->seq_nr; + uint16 ack_nr = b1->ack_nr; + log(UTP_LOG_DEBUG, "send %s len:%u id:%u timestamp:"I64u" reply_micro:%u flags:%s seq_nr:%u ack_nr:%u", + addrfmt(addr, addrbuf), (uint)length, conn_id_send, time, reply_micro, flagnames[flags2], + seq_nr, ack_nr); +#endif + send_to_addr(ctx, b, length, addr, flags); + removeSocketFromAckList(this); +} + +void UTPSocket::send_ack(bool synack) +{ + PacketFormatAckV1 pfa; + zeromem(&pfa); + + size_t len; + last_rcv_win = get_rcv_window(); + pfa.pf.set_version(1); + pfa.pf.set_type(ST_STATE); + pfa.pf.ext = 0; + pfa.pf.connid = conn_id_send; + pfa.pf.ack_nr = ack_nr; + pfa.pf.seq_nr = seq_nr; + pfa.pf.windowsize = (uint32)last_rcv_win; + len = sizeof(PacketFormatV1); + + // we never need to send EACK for connections + // that are shutting down + if (reorder_count != 0 && state < CS_GOT_FIN) { + // if reorder count > 0, send an EACK. + // reorder count should always be 0 + // for synacks, so this should not be + // as synack + assert(!synack); + pfa.pf.ext = 1; + pfa.ext_next = 0; + pfa.ext_len = 4; + uint m = 0; + + // reorder count should only be non-zero + // if the packet ack_nr + 1 has not yet + // been received + assert(inbuf.get(ack_nr + 1) == NULL); + size_t window = min(14+16, inbuf.size()); + // Generate bit mask of segments received. + for (size_t i = 0; i < window; i++) { + if (inbuf.get(ack_nr + i + 2) != NULL) { + m |= 1 << i; + + #if UTP_DEBUG_LOGGING + log(UTP_LOG_DEBUG, "EACK packet [%u]", ack_nr + i + 2); + #endif + } + } + pfa.acks[0] = (byte)m; + pfa.acks[1] = (byte)(m >> 8); + pfa.acks[2] = (byte)(m >> 16); + pfa.acks[3] = (byte)(m >> 24); + len += 4 + 2; + + #if UTP_DEBUG_LOGGING + log(UTP_LOG_DEBUG, "Sending EACK %u [%u] bits:[%032b]", ack_nr, conn_id_send, m); + #endif + } else { + #if UTP_DEBUG_LOGGING + log(UTP_LOG_DEBUG, "Sending ACK %u [%u]", ack_nr, conn_id_send); + #endif + } + + send_data((byte*)&pfa, len, ack_overhead); + removeSocketFromAckList(this); +} + +void UTPSocket::send_keep_alive() +{ + ack_nr--; + + #if UTP_DEBUG_LOGGING + log(UTP_LOG_DEBUG, "Sending KeepAlive ACK %u [%u]", ack_nr, conn_id_send); + #endif + + send_ack(); + ack_nr++; +} + +void UTPSocket::send_rst(utp_context *ctx, + const PackedSockAddr &addr, uint32 conn_id_send, uint16 ack_nr, uint16 seq_nr) +{ + PacketFormatV1 pf1; + zeromem(&pf1); + + size_t len; + pf1.set_version(1); + pf1.set_type(ST_RESET); + pf1.ext = 0; + pf1.connid = conn_id_send; + pf1.ack_nr = ack_nr; + pf1.seq_nr = seq_nr; + pf1.windowsize = 0; + len = sizeof(PacketFormatV1); + +// LOG_DEBUG("%s: Sending RST id:%u seq_nr:%u ack_nr:%u", addrfmt(addr, addrbuf), conn_id_send, seq_nr, ack_nr); +// LOG_DEBUG("send %s len:%u id:%u", addrfmt(addr, addrbuf), (uint)len, conn_id_send); + send_to_addr(ctx, (const byte*)&pf1, len, addr); +} + +void UTPSocket::send_packet(OutgoingPacket *pkt) +{ + // only count against the quota the first time we + // send the packet. Don't enforce quota when closing + // a socket. Only enforce the quota when we're sending + // at slow rates (max window < packet size) + + //size_t max_send = min(max_window, opt_sndbuf, max_window_user); + time_t cur_time = utp_call_get_milliseconds(this->ctx, this); + + if (pkt->transmissions == 0 || pkt->need_resend) { + cur_window += pkt->payload; + } + + pkt->need_resend = false; + + PacketFormatV1* p1 = (PacketFormatV1*)pkt->data; + p1->ack_nr = ack_nr; + pkt->time_sent = utp_call_get_microseconds(this->ctx, this); + + //socklen_t salen; + //SOCKADDR_STORAGE sa = addr.get_sockaddr_storage(&salen); + bool use_as_mtu_probe = false; + + // TODO: this is subject to nasty wrapping issues! Below as well + if (mtu_discover_time < (uint64)cur_time) { + // it's time to reset our MTU assupmtions + // and trigger a new search + mtu_reset(); + } + + // don't use packets that are larger then mtu_ceiling + // as probes, since they were probably used as probes + // already and failed, now we need it to fragment + // just to get it through + // if seq_nr == 1, the probe would end up being 0 + // which is a magic number representing no-probe + // that why we don't send a probe for a packet with + // sequence number 0 + if (mtu_floor < mtu_ceiling + && pkt->length > mtu_floor + && pkt->length <= mtu_ceiling + && mtu_probe_seq == 0 + && seq_nr != 1 + && pkt->transmissions == 0) { + + // we've already incremented seq_nr + // for this packet + mtu_probe_seq = (seq_nr - 1) & ACK_NR_MASK; + mtu_probe_size = pkt->length; + assert(pkt->length >= mtu_floor); + assert(pkt->length <= mtu_ceiling); + use_as_mtu_probe = true; + log(UTP_LOG_MTU, "MTU [PROBE] floor:%d ceiling:%d current:%d" + , mtu_floor, mtu_ceiling, mtu_probe_size); + } + + pkt->transmissions++; + send_data((byte*)pkt->data, pkt->length, + (state == CS_SYN_SENT) ? connect_overhead + : (pkt->transmissions == 1) ? payload_bandwidth + : retransmit_overhead, use_as_mtu_probe ? UTP_UDP_DONTFRAG : 0); +} + +bool UTPSocket::is_full(int bytes) +{ + size_t packet_size = get_packet_size(); + if (bytes < 0) bytes = packet_size; + else if (bytes > (int)packet_size) bytes = (int)packet_size; + size_t max_send = min(max_window, opt_sndbuf, max_window_user); + + // subtract one to save space for the FIN packet + if (cur_window_packets >= OUTGOING_BUFFER_MAX_SIZE - 1) { + + #if UTP_DEBUG_LOGGING + log(UTP_LOG_DEBUG, "is_full:false cur_window_packets:%d MAX:%d", cur_window_packets, OUTGOING_BUFFER_MAX_SIZE - 1); + #endif + + last_maxed_out_window = ctx->current_ms; + return true; + } + + #if UTP_DEBUG_LOGGING + log(UTP_LOG_DEBUG, "is_full:%s. cur_window:%u pkt:%u max:%u cur_window_packets:%u max_window:%u" + , (cur_window + bytes > max_send) ? "true" : "false" + , cur_window, bytes, max_send, cur_window_packets + , max_window); + #endif + + if (cur_window + bytes > max_send) { + last_maxed_out_window = ctx->current_ms; + return true; + } + return false; +} + +bool UTPSocket::flush_packets() +{ + size_t packet_size = get_packet_size(); + + // send packets that are waiting on the pacer to be sent + // i has to be an unsigned 16 bit counter to wrap correctly + // signed types are not guaranteed to wrap the way you expect + for (uint16 i = seq_nr - cur_window_packets; i != seq_nr; ++i) { + OutgoingPacket *pkt = (OutgoingPacket*)outbuf.get(i); + if (pkt == 0 || (pkt->transmissions > 0 && pkt->need_resend == false)) continue; + // have we run out of quota? + if (is_full()) return true; + + // Nagle check + // don't send the last packet if we have one packet in-flight + // and the current packet is still smaller than packet_size. + if (i != ((seq_nr - 1) & ACK_NR_MASK) || + cur_window_packets == 1 || + pkt->payload >= packet_size) { + send_packet(pkt); + } + } + return false; +} + +// @payload: number of bytes to send +// @flags: either ST_DATA, or ST_FIN +// @iovec: base address of iovec array +// @num_iovecs: number of iovecs in array +void UTPSocket::write_outgoing_packet(size_t payload, uint flags, struct utp_iovec *iovec, size_t num_iovecs) +{ + // Setup initial timeout timer + if (cur_window_packets == 0) { + retransmit_timeout = rto; + rto_timeout = ctx->current_ms + retransmit_timeout; + assert(cur_window == 0); + } + + size_t packet_size = get_packet_size(); + do { + assert(cur_window_packets < OUTGOING_BUFFER_MAX_SIZE); + assert(flags == ST_DATA || flags == ST_FIN); + + size_t added = 0; + + OutgoingPacket *pkt = NULL; + + if (cur_window_packets > 0) { + pkt = (OutgoingPacket*)outbuf.get(seq_nr - 1); + } + + const size_t header_size = get_header_size(); + bool append = true; + + // if there's any room left in the last packet in the window + // and it hasn't been sent yet, fill that frame first + if (payload && pkt && !pkt->transmissions && pkt->payload < packet_size) { + // Use the previous unsent packet + added = min(payload + pkt->payload, max(packet_size, pkt->payload)) - pkt->payload; + pkt = (OutgoingPacket*)realloc(pkt, + (sizeof(OutgoingPacket) - 1) + + header_size + + pkt->payload + added); + outbuf.put(seq_nr - 1, pkt); + append = false; + assert(!pkt->need_resend); + } else { + // Create the packet to send. + added = payload; + pkt = (OutgoingPacket*)malloc((sizeof(OutgoingPacket) - 1) + + header_size + + added); + pkt->payload = 0; + pkt->transmissions = 0; + pkt->need_resend = false; + } + + if (added) { + assert(flags == ST_DATA); + + // Fill it with data from the upper layer. + unsigned char *p = pkt->data + header_size + pkt->payload; + size_t needed = added; + + /* + while (needed) { + *p = *(char*)iovec[0].iov_base; + p++; + iovec[0].iov_base = (char *)iovec[0].iov_base + 1; + needed--; + } + */ + + for (size_t i = 0; i < num_iovecs && needed; i++) { + if (iovec[i].iov_len == 0) + continue; + + size_t num = min(needed, iovec[i].iov_len); + memcpy(p, iovec[i].iov_base, num); + + p += num; + + iovec[i].iov_len -= num; + iovec[i].iov_base = (byte*)iovec[i].iov_base + num; // iovec[i].iov_base += num, but without void* pointers + needed -= num; + } + + assert(needed == 0); + } + pkt->payload += added; + pkt->length = header_size + pkt->payload; + + last_rcv_win = get_rcv_window(); + + PacketFormatV1* p1 = (PacketFormatV1*)pkt->data; + p1->set_version(1); + p1->set_type(flags); + p1->ext = 0; + p1->connid = conn_id_send; + p1->windowsize = (uint32)last_rcv_win; + p1->ack_nr = ack_nr; + + if (append) { + // Remember the message in the outgoing queue. + outbuf.ensure_size(seq_nr, cur_window_packets); + outbuf.put(seq_nr, pkt); + p1->seq_nr = seq_nr; + seq_nr++; + cur_window_packets++; + } + + payload -= added; + + } while (payload); + + flush_packets(); +} + +#ifdef _DEBUG +void UTPSocket::check_invariant() +{ + if (reorder_count > 0) { + assert(inbuf.get(ack_nr + 1) == NULL); + } + + size_t outstanding_bytes = 0; + for (int i = 0; i < cur_window_packets; ++i) { + OutgoingPacket *pkt = (OutgoingPacket*)outbuf.get(seq_nr - i - 1); + if (pkt == 0 || pkt->transmissions == 0 || pkt->need_resend) continue; + outstanding_bytes += pkt->payload; + } + assert(outstanding_bytes == cur_window); +} +#endif + +void UTPSocket::check_timeouts() +{ + #ifdef _DEBUG + check_invariant(); + #endif + + // this invariant should always be true + assert(cur_window_packets == 0 || outbuf.get(seq_nr - cur_window_packets)); + + #if UTP_DEBUG_LOGGING + log(UTP_LOG_DEBUG, "CheckTimeouts timeout:%d max_window:%u cur_window:%u " + "state:%s cur_window_packets:%u", + (int)(rto_timeout - ctx->current_ms), (uint)max_window, (uint)cur_window, + statenames[state], cur_window_packets); + #endif + + if (state != CS_DESTROY) flush_packets(); + + switch (state) { + case CS_SYN_SENT: + case CS_SYN_RECV: + case CS_CONNECTED_FULL: + case CS_CONNECTED: + case CS_FIN_SENT: { + + // Reset max window... + if ((int)(ctx->current_ms - zerowindow_time) >= 0 && max_window_user == 0) { + max_window_user = PACKET_SIZE; + } + + if ((int)(ctx->current_ms - rto_timeout) >= 0 + && rto_timeout > 0) { + + bool ignore_loss = false; + + if (cur_window_packets == 1 + && ((seq_nr - 1) & ACK_NR_MASK) == mtu_probe_seq + && mtu_probe_seq != 0) { + // we only had a single outstanding packet that timed out, and it was the probe + mtu_ceiling = mtu_probe_size - 1; + mtu_search_update(); + // this packet was most likely dropped because the packet size being + // too big and not because congestion. To accelerate the binary search for + // the MTU, resend immediately and don't reset the window size + ignore_loss = true; + log(UTP_LOG_MTU, "MTU [PROBE-TIMEOUT] floor:%d ceiling:%d current:%d" + , mtu_floor, mtu_ceiling, mtu_last); + } + // we dropepd the probe, clear these fields to + // allow us to send a new one + mtu_probe_seq = mtu_probe_size = 0; + log(UTP_LOG_MTU, "MTU [TIMEOUT]"); + + /* + OutgoingPacket *pkt = (OutgoingPacket*)outbuf.get(seq_nr - cur_window_packets); + + // If there were a lot of retransmissions, force recomputation of round trip time + if (pkt->transmissions >= 4) + rtt = 0; + */ + + // Increase RTO + const uint new_timeout = ignore_loss ? retransmit_timeout : retransmit_timeout * 2; + + // They initiated the connection but failed to respond before the rto. + // A malicious client can also spoof the destination address of a ST_SYN bringing us to this state. + // Kill the connection and do not notify the upper layer + if (state == CS_SYN_RECV) { + state = CS_DESTROY; + utp_call_on_error(ctx, this, UTP_ETIMEDOUT); + return; + } + + // We initiated the connection but the other side failed to respond before the rto + if (retransmit_count >= 4 || (state == CS_SYN_SENT && retransmit_count >= 2)) { + // 4 consecutive transmissions have timed out. Kill it. If we + // haven't even connected yet, give up after only 2 consecutive + // failed transmissions. + if (state == CS_FIN_SENT) + state = CS_DESTROY; + else + state = CS_RESET; + utp_call_on_error(ctx, this, UTP_ETIMEDOUT); + return; + } + + retransmit_timeout = new_timeout; + rto_timeout = ctx->current_ms + new_timeout; + + if (!ignore_loss) { + // On Timeout + duplicate_ack = 0; + + int packet_size = get_packet_size(); + + if ((cur_window_packets == 0) && ((int)max_window > packet_size)) { + // we don't have any packets in-flight, even though + // we could. This implies that the connection is just + // idling. No need to be aggressive about resetting the + // congestion window. Just let it decay by a 3:rd. + // don't set it any lower than the packet size though + max_window = max(max_window * 2 / 3, size_t(packet_size)); + } else { + // our delay was so high that our congestion window + // was shrunk below one packet, preventing us from + // sending anything for one time-out period. Now, reset + // the congestion window to fit one packet, to start over + // again + max_window = packet_size; + slow_start = true; + } + } + + // every packet should be considered lost + for (int i = 0; i < cur_window_packets; ++i) { + OutgoingPacket *pkt = (OutgoingPacket*)outbuf.get(seq_nr - i - 1); + if (pkt == 0 || pkt->transmissions == 0 || pkt->need_resend) continue; + pkt->need_resend = true; + assert(cur_window >= pkt->payload); + cur_window -= pkt->payload; + } + + if (cur_window_packets > 0) { + retransmit_count++; + // used in parse_log.py + log(UTP_LOG_NORMAL, "Packet timeout. Resend. seq_nr:%u. timeout:%u " + "max_window:%u cur_window_packets:%d" + , seq_nr - cur_window_packets, retransmit_timeout + , (uint)max_window, int(cur_window_packets)); + + fast_timeout = true; + timeout_seq_nr = seq_nr; + + OutgoingPacket *pkt = (OutgoingPacket*)outbuf.get(seq_nr - cur_window_packets); + assert(pkt); + + // Re-send the packet. + send_packet(pkt); + } + } + + // Mark the socket as writable. If the cwnd has grown, or if the number of + // bytes in-flight is lower than cwnd, we need to make the socket writable again + // in case it isn't + if (state == CS_CONNECTED_FULL && !is_full()) { + state = CS_CONNECTED; + + #if UTP_DEBUG_LOGGING + log(UTP_LOG_DEBUG, "Socket writable. max_window:%u cur_window:%u packet_size:%u", + (uint)max_window, (uint)cur_window, (uint)get_packet_size()); + #endif + utp_call_on_state_change(this->ctx, this, UTP_STATE_WRITABLE); + } + + if (state >= CS_CONNECTED && state < CS_GOT_FIN) { + if ((int)(ctx->current_ms - last_sent_packet) >= KEEPALIVE_INTERVAL) { + send_keep_alive(); + } + } + break; + } + + // Close? + case CS_GOT_FIN: + case CS_DESTROY_DELAY: + if ((int)(ctx->current_ms - rto_timeout) >= 0) { + state = (state == CS_DESTROY_DELAY) ? CS_DESTROY : CS_RESET; + if (cur_window_packets > 0) { + utp_call_on_error(ctx, this, UTP_ECONNRESET); + } + } + break; + // prevent warning + case CS_UNINITIALIZED: + case CS_IDLE: + case CS_RESET: + case CS_DESTROY: + break; + } +} + +// this should be called every time we change mtu_floor or mtu_ceiling +void UTPSocket::mtu_search_update() +{ + assert(mtu_floor <= mtu_ceiling); + + // binary search + mtu_last = (mtu_floor + mtu_ceiling) / 2; + + // enable a new probe to be sent + mtu_probe_seq = mtu_probe_size = 0; + + // if the floor and ceiling are close enough, consider the + // MTU binary search complete. We set the current value + // to floor since that's the only size we know can go through + // also set the ceiling to floor to terminate the searching + if (mtu_ceiling - mtu_floor <= 16) { + mtu_last = mtu_floor; + log(UTP_LOG_MTU, "MTU [DONE] floor:%d ceiling:%d current:%d" + , mtu_floor, mtu_ceiling, mtu_last); + mtu_ceiling = mtu_floor; + assert(mtu_floor <= mtu_ceiling); + // Do another search in 30 minutes + mtu_discover_time = utp_call_get_milliseconds(this->ctx, this) + 30 * 60 * 1000; + } +} + +void UTPSocket::mtu_reset() +{ + mtu_ceiling = get_udp_mtu(); + // Less would not pass TCP... + mtu_floor = 576; + log(UTP_LOG_MTU, "MTU [RESET] floor:%d ceiling:%d current:%d" + , mtu_floor, mtu_ceiling, mtu_last); + assert(mtu_floor <= mtu_ceiling); + mtu_discover_time = utp_call_get_milliseconds(this->ctx, this) + 30 * 60 * 1000; +} + +// returns: +// 0: the packet was acked. +// 1: it means that the packet had already been acked +// 2: the packet has not been sent yet +int UTPSocket::ack_packet(uint16 seq) +{ + OutgoingPacket *pkt = (OutgoingPacket*)outbuf.get(seq); + + // the packet has already been acked (or not sent) + if (pkt == NULL) { + + #if UTP_DEBUG_LOGGING + log(UTP_LOG_DEBUG, "got ack for:%u (already acked, or never sent)", seq); + #endif + + return 1; + } + + // can't ack packets that haven't been sent yet! + if (pkt->transmissions == 0) { + + #if UTP_DEBUG_LOGGING + log(UTP_LOG_DEBUG, "got ack for:%u (never sent, pkt_size:%u need_resend:%u)", + seq, (uint)pkt->payload, pkt->need_resend); + #endif + + return 2; + } + + #if UTP_DEBUG_LOGGING + log(UTP_LOG_DEBUG, "got ack for:%u (pkt_size:%u need_resend:%u)", + seq, (uint)pkt->payload, pkt->need_resend); + #endif + + outbuf.put(seq, NULL); + + // if we never re-sent the packet, update the RTT estimate + if (pkt->transmissions == 1) { + // Estimate the round trip time. + const uint32 ertt = (uint32)((utp_call_get_microseconds(this->ctx, this) - pkt->time_sent) / 1000); + if (rtt == 0) { + // First round trip time sample + rtt = ertt; + rtt_var = ertt / 2; + // sanity check. rtt should never be more than 6 seconds +// assert(rtt < 6000); + } else { + // Compute new round trip times + const int delta = (int)rtt - ertt; + rtt_var = rtt_var + (int)(abs(delta) - rtt_var) / 4; + rtt = rtt - rtt/8 + ertt/8; + // sanity check. rtt should never be more than 6 seconds +// assert(rtt < 6000); + rtt_hist.add_sample(ertt, ctx->current_ms); + } + rto = max(rtt + rtt_var * 4, 1000); + + #if UTP_DEBUG_LOGGING + log(UTP_LOG_DEBUG, "rtt:%u avg:%u var:%u rto:%u", + ertt, rtt, rtt_var, rto); + #endif + + } + retransmit_timeout = rto; + rto_timeout = ctx->current_ms + rto; + // if need_resend is set, this packet has already + // been considered timed-out, and is not included in + // the cur_window anymore + if (!pkt->need_resend) { + assert(cur_window >= pkt->payload); + cur_window -= pkt->payload; + } + free(pkt); + retransmit_count = 0; + return 0; +} + +// count the number of bytes that were acked by the EACK header +size_t UTPSocket::selective_ack_bytes(uint base, const byte* mask, byte len, int64& min_rtt) +{ + if (cur_window_packets == 0) return 0; + + size_t acked_bytes = 0; + int bits = len * 8; + uint64 now = utp_call_get_microseconds(this->ctx, this); + + do { + uint v = base + bits; + + // ignore bits that haven't been sent yet + // see comment in UTPSocket::selective_ack + if (((seq_nr - v - 1) & ACK_NR_MASK) >= (uint16)(cur_window_packets - 1)) + continue; + + // ignore bits that represents packets we haven't sent yet + // or packets that have already been acked + OutgoingPacket *pkt = (OutgoingPacket*)outbuf.get(v); + if (!pkt || pkt->transmissions == 0) + continue; + + // Count the number of segments that were successfully received past it. + if (bits >= 0 && mask[bits>>3] & (1 << (bits & 7))) { + assert((int)(pkt->payload) >= 0); + acked_bytes += pkt->payload; + if (pkt->time_sent < now) + min_rtt = min(min_rtt, now - pkt->time_sent); + else + min_rtt = min(min_rtt, 50000); + continue; + } + } while (--bits >= -1); + return acked_bytes; +} + +enum { MAX_EACK = 128 }; + +void UTPSocket::selective_ack(uint base, const byte *mask, byte len) +{ + if (cur_window_packets == 0) return; + + // the range is inclusive [0, 31] bits + int bits = len * 8 - 1; + + int count = 0; + + // resends is a stack of sequence numbers we need to resend. Since we + // iterate in reverse over the acked packets, at the end, the top packets + // are the ones we want to resend + int resends[MAX_EACK]; + int nr = 0; + +#if UTP_DEBUG_LOGGING + char bitmask[1024] = {0}; + int counter = bits; + for (int i = 0; i <= bits; ++i) { + bool bit_set = counter >= 0 && mask[counter>>3] & (1 << (counter & 7)); + bitmask[i] = bit_set ? '1' : '0'; + --counter; + } + + log(UTP_LOG_DEBUG, "Got EACK [%s] base:%u", bitmask, base); +#endif + + do { + // we're iterating over the bits from higher sequence numbers + // to lower (kind of in reverse order, wich might not be very + // intuitive) + uint v = base + bits; + + // ignore bits that haven't been sent yet + // and bits that fall below the ACKed sequence number + // this can happen if an EACK message gets + // reordered and arrives after a packet that ACKs up past + // the base for thie EACK message + + // this is essentially the same as: + // if v >= seq_nr || v <= seq_nr - cur_window_packets + // but it takes wrapping into account + + // if v == seq_nr the -1 will make it wrap. if v > seq_nr + // it will also wrap (since it will fall further below 0) + // and be > cur_window_packets. + // if v == seq_nr - cur_window_packets, the result will be + // seq_nr - (seq_nr - cur_window_packets) - 1 + // == seq_nr - seq_nr + cur_window_packets - 1 + // == cur_window_packets - 1 which will be caught by the + // test. If v < seq_nr - cur_window_packets the result will grow + // fall furhter outside of the cur_window_packets range. + + // sequence number space: + // + // rejected < accepted > rejected + // <============+--------------+============> + // ^ ^ + // | | + // (seq_nr-wnd) seq_nr + + if (((seq_nr - v - 1) & ACK_NR_MASK) >= (uint16)(cur_window_packets - 1)) + continue; + + // this counts as a duplicate ack, even though we might have + // received an ack for this packet previously (in another EACK + // message for instance) + bool bit_set = bits >= 0 && mask[bits>>3] & (1 << (bits & 7)); + + // if this packet is acked, it counts towards the duplicate ack counter + if (bit_set) count++; + + // ignore bits that represents packets we haven't sent yet + // or packets that have already been acked + OutgoingPacket *pkt = (OutgoingPacket*)outbuf.get(v); + if (!pkt || pkt->transmissions == 0) { + + #if UTP_DEBUG_LOGGING + log(UTP_LOG_DEBUG, "skipping %u. pkt:%08x transmissions:%u %s", + v, pkt, pkt?pkt->transmissions:0, pkt?"(not sent yet?)":"(already acked?)"); + #endif + continue; + } + + // Count the number of segments that were successfully received past it. + if (bit_set) { + // the selective ack should never ACK the packet we're waiting for to decrement cur_window_packets + assert((v & outbuf.mask) != ((seq_nr - cur_window_packets) & outbuf.mask)); + ack_packet(v); + continue; + } + + // Resend segments + // if count is less than our re-send limit, we haven't seen enough + // acked packets in front of this one to warrant a re-send. + // if count == 0, we're still going through the tail of zeroes + if (((v - fast_resend_seq_nr) & ACK_NR_MASK) <= OUTGOING_BUFFER_MAX_SIZE && + count >= DUPLICATE_ACKS_BEFORE_RESEND) { + // resends is a stack, and we're mostly interested in the top of it + // if we're full, just throw away the lower half + if (nr >= MAX_EACK - 2) { + memmove(resends, &resends[MAX_EACK/2], MAX_EACK/2 * sizeof(resends[0])); + nr -= MAX_EACK / 2; + } + resends[nr++] = v; + + #if UTP_DEBUG_LOGGING + log(UTP_LOG_DEBUG, "no ack for %u", v); + #endif + + } else { + + #if UTP_DEBUG_LOGGING + log(UTP_LOG_DEBUG, "not resending %u count:%d dup_ack:%u fast_resend_seq_nr:%u", + v, count, duplicate_ack, fast_resend_seq_nr); + #endif + } + } while (--bits >= -1); + + if (((base - 1 - fast_resend_seq_nr) & ACK_NR_MASK) <= OUTGOING_BUFFER_MAX_SIZE && + count >= DUPLICATE_ACKS_BEFORE_RESEND) { + // if we get enough duplicate acks to start + // resending, the first packet we should resend + // is base-1 + resends[nr++] = (base - 1) & ACK_NR_MASK; + + #if UTP_DEBUG_LOGGING + log(UTP_LOG_DEBUG, "no ack for %u", (base - 1) & ACK_NR_MASK); + #endif + + } else { + #if UTP_DEBUG_LOGGING + log(UTP_LOG_DEBUG, "not resending %u count:%d dup_ack:%u fast_resend_seq_nr:%u", + base - 1, count, duplicate_ack, fast_resend_seq_nr); + #endif + } + + bool back_off = false; + int i = 0; + while (nr > 0) { + uint v = resends[--nr]; + // don't consider the tail of 0:es to be lost packets + // only unacked packets with acked packets after should + // be considered lost + OutgoingPacket *pkt = (OutgoingPacket*)outbuf.get(v); + + // this may be an old (re-ordered) packet, and some of the + // packets in here may have been acked already. In which + // case they will not be in the send queue anymore + if (!pkt) continue; + + // used in parse_log.py + log(UTP_LOG_NORMAL, "Packet %u lost. Resending", v); + + // On Loss + back_off = true; + + #ifdef _DEBUG + ++_stats.rexmit; + #endif + + send_packet(pkt); + fast_resend_seq_nr = (v + 1) & ACK_NR_MASK; + + // Re-send max 4 packets. + if (++i >= 4) break; + } + + if (back_off) + maybe_decay_win(ctx->current_ms); + + duplicate_ack = count; +} + +void UTPSocket::apply_ccontrol(size_t bytes_acked, uint32 actual_delay, int64 min_rtt) +{ + // the delay can never be greater than the rtt. The min_rtt + // variable is the RTT in microseconds + + assert(min_rtt >= 0); + int32 our_delay = min(our_hist.get_value(), uint32(min_rtt)); + assert(our_delay != INT_MAX); + assert(our_delay >= 0); + + utp_call_on_delay_sample(this->ctx, this, our_delay / 1000); + + // This test the connection under heavy load from foreground + // traffic. Pretend that our delays are very high to force the + // connection to use sub-packet size window sizes + //our_delay *= 4; + + // target is microseconds + int target = target_delay; + if (target <= 0) target = 100000; + + // this is here to compensate for very large clock drift that affects + // the congestion controller into giving certain endpoints an unfair + // share of the bandwidth. We have an estimate of the clock drift + // (clock_drift). The unit of this is microseconds per 5 seconds. + // empirically, a reasonable cut-off appears to be about 200000 + // (which is pretty high). The main purpose is to compensate for + // people trying to "cheat" uTP by making their clock run slower, + // and this definitely catches that without any risk of false positives + // if clock_drift < -200000 start applying a penalty delay proportional + // to how far beoynd -200000 the clock drift is + int32 penalty = 0; + if (clock_drift < -200000) { + penalty = (-clock_drift - 200000) / 7; + our_delay += penalty; + } + + double off_target = target - our_delay; + + // this is the same as: + // + // (min(off_target, target) / target) * (bytes_acked / max_window) * MAX_CWND_INCREASE_BYTES_PER_RTT + // + // so, it's scaling the max increase by the fraction of the window this ack represents, and the fraction + // of the target delay the current delay represents. + // The min() around off_target protects against crazy values of our_delay, which may happen when th + // timestamps wraps, or by just having a malicious peer sending garbage. This caps the increase + // of the window size to MAX_CWND_INCREASE_BYTES_PER_RTT per rtt. + // as for large negative numbers, this direction is already capped at the min packet size further down + // the min around the bytes_acked protects against the case where the window size was recently + // shrunk and the number of acked bytes exceeds that. This is considered no more than one full + // window, in order to keep the gain within sane boundries. + + assert(bytes_acked > 0); + double window_factor = (double)min(bytes_acked, max_window) / (double)max(max_window, bytes_acked); + + double delay_factor = off_target / target; + double scaled_gain = MAX_CWND_INCREASE_BYTES_PER_RTT * window_factor * delay_factor; + + // since MAX_CWND_INCREASE_BYTES_PER_RTT is a cap on how much the window size (max_window) + // may increase per RTT, we may not increase the window size more than that proportional + // to the number of bytes that were acked, so that once one window has been acked (one rtt) + // the increase limit is not exceeded + // the +1. is to allow for floating point imprecision + assert(scaled_gain <= 1. + MAX_CWND_INCREASE_BYTES_PER_RTT * (double)min(bytes_acked, max_window) / (double)max(max_window, bytes_acked)); + + if (scaled_gain > 0 && ctx->current_ms - last_maxed_out_window > 1000) { + // if it was more than 1 second since we tried to send a packet + // and stopped because we hit the max window, we're most likely rate + // limited (which prevents us from ever hitting the window size) + // if this is the case, we cannot let the max_window grow indefinitely + scaled_gain = 0; + } + + size_t ledbat_cwnd = (max_window + scaled_gain < MIN_WINDOW_SIZE) ? MIN_WINDOW_SIZE : (size_t)(max_window + scaled_gain); + + if (slow_start) { + size_t ss_cwnd = (size_t)(max_window + window_factor*get_packet_size()); + if (ss_cwnd > ssthresh) { + slow_start = false; + } else if (our_delay > target*0.9) { + // even if we're a little under the target delay, we conservatively + // discontinue the slow start phase + slow_start = false; + ssthresh = max_window; + } else { + max_window = max(ss_cwnd, ledbat_cwnd); + } + } else { + max_window = ledbat_cwnd; + } + + + // make sure that the congestion window is below max + // make sure that we don't shrink our window too small + max_window = clamp(max_window, MIN_WINDOW_SIZE, opt_sndbuf); + + // used in parse_log.py + log(UTP_LOG_NORMAL, "actual_delay:%u our_delay:%d their_delay:%u off_target:%d max_window:%u " + "delay_base:%u delay_sum:%d target_delay:%d acked_bytes:%u cur_window:%u " + "scaled_gain:%f rtt:%u rate:%u wnduser:%u rto:%u timeout:%d get_microseconds:"I64u" " + "cur_window_packets:%u packet_size:%u their_delay_base:%u their_actual_delay:%u " + "average_delay:%d clock_drift:%d clock_drift_raw:%d delay_penalty:%d current_delay_sum:"I64u + "current_delay_samples:%d average_delay_base:%d last_maxed_out_window:"I64u" opt_sndbuf:%d " + "current_ms:"I64u"", + actual_delay, our_delay / 1000, their_hist.get_value() / 1000, + int(off_target / 1000), uint(max_window), uint32(our_hist.delay_base), + int((our_delay + their_hist.get_value()) / 1000), int(target / 1000), uint(bytes_acked), + (uint)(cur_window - bytes_acked), (float)(scaled_gain), rtt, + (uint)(max_window * 1000 / (rtt_hist.delay_base?rtt_hist.delay_base:50)), + (uint)max_window_user, rto, (int)(rto_timeout - ctx->current_ms), + utp_call_get_microseconds(this->ctx, this), cur_window_packets, (uint)get_packet_size(), + their_hist.delay_base, their_hist.delay_base + their_hist.get_value(), + average_delay, clock_drift, clock_drift_raw, penalty / 1000, + current_delay_sum, current_delay_samples, average_delay_base, + uint64(last_maxed_out_window), int(opt_sndbuf), uint64(ctx->current_ms)); +} + +static void utp_register_recv_packet(UTPSocket *conn, size_t len) +{ + #ifdef _DEBUG + ++conn->_stats.nrecv; + conn->_stats.nbytes_recv += len; + #endif + + if (len <= PACKET_SIZE_MID) { + if (len <= PACKET_SIZE_EMPTY) { + conn->ctx->context_stats._nraw_recv[PACKET_SIZE_EMPTY_BUCKET]++; + } else if (len <= PACKET_SIZE_SMALL) { + conn->ctx->context_stats._nraw_recv[PACKET_SIZE_SMALL_BUCKET]++; + } else + conn->ctx->context_stats._nraw_recv[PACKET_SIZE_MID_BUCKET]++; + } else { + if (len <= PACKET_SIZE_BIG) { + conn->ctx->context_stats._nraw_recv[PACKET_SIZE_BIG_BUCKET]++; + } else + conn->ctx->context_stats._nraw_recv[PACKET_SIZE_HUGE_BUCKET]++; + } +} + +// returns the max number of bytes of payload the uTP +// connection is allowed to send +size_t UTPSocket::get_packet_size() const +{ + int header_size = sizeof(PacketFormatV1); + size_t mtu = mtu_last ? mtu_last : mtu_ceiling; + return mtu - header_size; +} + +// Process an incoming packet +// syn is true if this is the first packet received. It will cut off parsing +// as soon as the header is done +size_t utp_process_incoming(UTPSocket *conn, const byte *packet, size_t len, bool syn = false) +{ + utp_register_recv_packet(conn, len); + + conn->ctx->current_ms = utp_call_get_milliseconds(conn->ctx, conn); + + const PacketFormatV1 *pf1 = (PacketFormatV1*)packet; + const byte *packet_end = packet + len; + + uint16 pk_seq_nr = pf1->seq_nr; + uint16 pk_ack_nr = pf1->ack_nr; + uint8 pk_flags = pf1->type(); + + if (pk_flags >= ST_NUM_STATES) return 0; + + #if UTP_DEBUG_LOGGING + conn->log(UTP_LOG_DEBUG, "Got %s. seq_nr:%u ack_nr:%u state:%s timestamp:"I64u" reply_micro:%u" + , flagnames[pk_flags], pk_seq_nr, pk_ack_nr, statenames[conn->state] + , uint64(pf1->tv_usec), (uint32)(pf1->reply_micro)); + #endif + + // mark receipt time + uint64 time = utp_call_get_microseconds(conn->ctx, conn); + + // window packets size is used to calculate a minimum + // permissible range for received acks. connections with acks falling + // out of this range are dropped + const uint16 curr_window = max(conn->cur_window_packets + ACK_NR_ALLOWED_WINDOW, ACK_NR_ALLOWED_WINDOW); + + // ignore packets whose ack_nr is invalid. This would imply a spoofed address + // or a malicious attempt to attach the uTP implementation. + // acking a packet that hasn't been sent yet! + // SYN packets have an exception, since there are no previous packets + if ((pk_flags != ST_SYN || conn->state != CS_SYN_RECV) && + (wrapping_compare_less(conn->seq_nr - 1, pk_ack_nr, ACK_NR_MASK) + || wrapping_compare_less(pk_ack_nr, conn->seq_nr - 1 - curr_window, ACK_NR_MASK))) { +#if UTP_DEBUG_LOGGING + conn->log(UTP_LOG_DEBUG, "Invalid ack_nr: %u. our seq_nr: %u last unacked: %u" + , pk_ack_nr, conn->seq_nr, (conn->seq_nr - conn->cur_window_packets) & ACK_NR_MASK); +#endif + return 0; + } + + // RSTs are handled earlier, since the connid matches the send id not the recv id + assert(pk_flags != ST_RESET); + + // TODO: maybe send a ST_RESET if we're in CS_RESET? + + const byte *selack_ptr = NULL; + + // Unpack UTP packet options + // Data pointer + const byte *data = (const byte*)pf1 + conn->get_header_size(); + if (conn->get_header_size() > len) { + + #if UTP_DEBUG_LOGGING + conn->log(UTP_LOG_DEBUG, "Invalid packet size (less than header size)"); + #endif + + return 0; + } + // Skip the extension headers + uint extension = pf1->ext; + if (extension != 0) { + do { + // Verify that the packet is valid. + data += 2; + + if ((int)(packet_end - data) < 0 || (int)(packet_end - data) < data[-1]) { + + #if UTP_DEBUG_LOGGING + conn->log(UTP_LOG_DEBUG, "Invalid len of extensions"); + #endif + + return 0; + } + + switch(extension) { + case 1: // Selective Acknowledgment + selack_ptr = data; + break; + case 2: // extension bits + if (data[-1] != 8) { + + #if UTP_DEBUG_LOGGING + conn->log(UTP_LOG_DEBUG, "Invalid len of extension bits header"); + #endif + + return 0; + } + memcpy(conn->extensions, data, 8); + + #if UTP_DEBUG_LOGGING + conn->log(UTP_LOG_DEBUG, "got extension bits:%02x%02x%02x%02x%02x%02x%02x%02x", + conn->extensions[0], conn->extensions[1], conn->extensions[2], conn->extensions[3], + conn->extensions[4], conn->extensions[5], conn->extensions[6], conn->extensions[7]); + #endif + } + extension = data[-2]; + data += data[-1]; + } while (extension); + } + + if (conn->state == CS_SYN_SENT) { + // if this is a syn-ack, initialize our ack_nr + // to match the sequence number we got from + // the other end + conn->ack_nr = (pk_seq_nr - 1) & SEQ_NR_MASK; + } + + conn->last_got_packet = conn->ctx->current_ms; + + if (syn) { + return 0; + } + + // seqnr is the number of packets past the expected + // packet this is. ack_nr is the last acked, seq_nr is the + // current. Subtracring 1 makes 0 mean "this is the next + // expected packet". + const uint seqnr = (pk_seq_nr - conn->ack_nr - 1) & SEQ_NR_MASK; + + // Getting an invalid sequence number? + if (seqnr >= REORDER_BUFFER_MAX_SIZE) { + if (seqnr >= (SEQ_NR_MASK + 1) - REORDER_BUFFER_MAX_SIZE && pk_flags != ST_STATE) { + conn->schedule_ack(); + } + + #if UTP_DEBUG_LOGGING + conn->log(UTP_LOG_DEBUG, " Got old Packet/Ack (%u/%u)=%u" + , pk_seq_nr, conn->ack_nr, seqnr); + #endif + return 0; + } + + // Process acknowledgment + // acks is the number of packets that was acked + int acks = (pk_ack_nr - (conn->seq_nr - 1 - conn->cur_window_packets)) & ACK_NR_MASK; + + // this happens when we receive an old ack nr + if (acks > conn->cur_window_packets) acks = 0; + + // if we get the same ack_nr as in the last packet + // increase the duplicate_ack counter, otherwise reset + // it to 0 + if (conn->cur_window_packets > 0) { + if (pk_ack_nr == ((conn->seq_nr - conn->cur_window_packets - 1) & ACK_NR_MASK) + && conn->cur_window_packets > 0) { + ++conn->duplicate_ack; + if (conn->duplicate_ack == DUPLICATE_ACKS_BEFORE_RESEND && conn->mtu_probe_seq) { + // It's likely that the probe was rejected due to its size, but we haven't got an + // ICMP report back yet + if (pk_ack_nr == ((conn->mtu_probe_seq - 1) & ACK_NR_MASK)) { + conn->mtu_ceiling = conn->mtu_probe_size - 1; + conn->mtu_search_update(); + conn->log(UTP_LOG_MTU, "MTU [DUPACK] floor:%d ceiling:%d current:%d" + , conn->mtu_floor, conn->mtu_ceiling, conn->mtu_last); + } else { + // A non-probe was blocked before our probe. + // Can't conclude much, send a new probe + conn->mtu_probe_seq = conn->mtu_probe_size = 0; + } + } + } else { + conn->duplicate_ack = 0; + } + + // TODO: if duplicate_ack == DUPLICATE_ACK_BEFORE_RESEND + // and fast_resend_seq_nr <= ack_nr + 1 + // resend ack_nr + 1 + // also call maybe_decay_win() + } + + // figure out how many bytes were acked + size_t acked_bytes = 0; + + // the minimum rtt of all acks + // this is the upper limit on the delay we get back + // from the other peer. Our delay cannot exceed + // the rtt of the packet. If it does, clamp it. + // this is done in apply_ledbat_ccontrol() + int64 min_rtt = INT64_MAX; + + uint64 now = utp_call_get_microseconds(conn->ctx, conn); + + for (int i = 0; i < acks; ++i) { + int seq = (conn->seq_nr - conn->cur_window_packets + i) & ACK_NR_MASK; + OutgoingPacket *pkt = (OutgoingPacket*)conn->outbuf.get(seq); + if (pkt == 0 || pkt->transmissions == 0) continue; + assert((int)(pkt->payload) >= 0); + acked_bytes += pkt->payload; + if (conn->mtu_probe_seq && seq == conn->mtu_probe_seq) { + conn->mtu_floor = conn->mtu_probe_size; + conn->mtu_search_update(); + conn->log(UTP_LOG_MTU, "MTU [ACK] floor:%d ceiling:%d current:%d" + , conn->mtu_floor, conn->mtu_ceiling, conn->mtu_last); + } + + // in case our clock is not monotonic + if (pkt->time_sent < now) + min_rtt = min(min_rtt, now - pkt->time_sent); + else + min_rtt = min(min_rtt, 50000); + } + + // count bytes acked by EACK + if (selack_ptr != NULL) { + acked_bytes += conn->selective_ack_bytes((pk_ack_nr + 2) & ACK_NR_MASK, + selack_ptr, selack_ptr[-1], min_rtt); + } + + #if UTP_DEBUG_LOGGING + conn->log(UTP_LOG_DEBUG, "acks:%d acked_bytes:%u seq_nr:%d cur_window:%u cur_window_packets:%u relative_seqnr:%u max_window:%u min_rtt:%u rtt:%u", + acks, (uint)acked_bytes, conn->seq_nr, (uint)conn->cur_window, conn->cur_window_packets, + seqnr, (uint)conn->max_window, (uint)(min_rtt / 1000), conn->rtt); + #endif + + uint64 p = pf1->tv_usec; + + conn->last_measured_delay = conn->ctx->current_ms; + + // get delay in both directions + // record the delay to report back + const uint32 their_delay = (uint32)(p == 0 ? 0 : time - p); + conn->reply_micro = their_delay; + uint32 prev_delay_base = conn->their_hist.delay_base; + if (their_delay != 0) conn->their_hist.add_sample(their_delay, conn->ctx->current_ms); + + // if their new delay base is less than their previous one + // we should shift our delay base in the other direction in order + // to take the clock skew into account + if (prev_delay_base != 0 && + wrapping_compare_less(conn->their_hist.delay_base, prev_delay_base, TIMESTAMP_MASK)) { + // never adjust more than 10 milliseconds + if (prev_delay_base - conn->their_hist.delay_base <= 10000) { + conn->our_hist.shift(prev_delay_base - conn->their_hist.delay_base); + } + } + + const uint32 actual_delay = (uint32(pf1->reply_micro)==INT_MAX?0:uint32(pf1->reply_micro)); + + // if the actual delay is 0, it means the other end + // hasn't received a sample from us yet, and doesn't + // know what it is. We can't update out history unless + // we have a true measured sample + prev_delay_base = conn->our_hist.delay_base; + if (actual_delay != 0) { + conn->our_hist.add_sample(actual_delay, conn->ctx->current_ms); + + // this is keeping an average of the delay samples + // we've recevied within the last 5 seconds. We sum + // all the samples and increase the count in order to + // calculate the average every 5 seconds. The samples + // are based off of the average_delay_base to deal with + // wrapping counters. + if (conn->average_delay_base == 0) conn->average_delay_base = actual_delay; + int64 average_delay_sample = 0; + // distance walking from lhs to rhs, downwards + const uint32 dist_down = conn->average_delay_base - actual_delay; + // distance walking from lhs to rhs, upwards + const uint32 dist_up = actual_delay - conn->average_delay_base; + + if (dist_down > dist_up) { +// assert(dist_up < INT_MAX / 4); + // average_delay_base < actual_delay, we should end up + // with a positive sample + average_delay_sample = dist_up; + } else { +// assert(-int64(dist_down) < INT_MAX / 4); + // average_delay_base >= actual_delay, we should end up + // with a negative sample + average_delay_sample = -int64(dist_down); + } + conn->current_delay_sum += average_delay_sample; + ++conn->current_delay_samples; + + if (conn->ctx->current_ms > conn->average_sample_time) { + + int32 prev_average_delay = conn->average_delay; + + assert(conn->current_delay_sum / conn->current_delay_samples < INT_MAX); + assert(conn->current_delay_sum / conn->current_delay_samples > -INT_MAX); + // write the new average + conn->average_delay = (int32)(conn->current_delay_sum / conn->current_delay_samples); + // each slot represents 5 seconds + conn->average_sample_time += 5000; + + conn->current_delay_sum = 0; + conn->current_delay_samples = 0; + + // this makes things very confusing when logging the average delay +//#if !g_log_utp + // normalize the average samples + // since we're only interested in the slope + // of the curve formed by the average delay samples, + // we can cancel out the actual offset to make sure + // we won't have problems with wrapping. + int min_sample = min(prev_average_delay, conn->average_delay); + int max_sample = max(prev_average_delay, conn->average_delay); + + // normalize around zero. Try to keep the min <= 0 and max >= 0 + int adjust = 0; + if (min_sample > 0) { + // adjust all samples (and the baseline) down by min_sample + adjust = -min_sample; + } else if (max_sample < 0) { + // adjust all samples (and the baseline) up by -max_sample + adjust = -max_sample; + } + if (adjust) { + conn->average_delay_base -= adjust; + conn->average_delay += adjust; + prev_average_delay += adjust; + } +//#endif + + // update the clock drift estimate + // the unit is microseconds per 5 seconds + // what we're doing is just calculating the average of the + // difference between each slot. Since each slot is 5 seconds + // and the timestamps unit are microseconds, we'll end up with + // the average slope across our history. If there is a consistent + // trend, it will show up in this value + + //int64 slope = 0; + int32 drift = conn->average_delay - prev_average_delay; + + // clock_drift is a rolling average + conn->clock_drift = (int64(conn->clock_drift) * 7 + drift) / 8; + conn->clock_drift_raw = drift; + } + } + + // if our new delay base is less than our previous one + // we should shift the other end's delay base in the other + // direction in order to take the clock skew into account + // This is commented out because it creates bad interactions + // with our adjustment in the other direction. We don't really + // need our estimates of the other peer to be very accurate + // anyway. The problem with shifting here is that we're more + // likely shift it back later because of a low latency. This + // second shift back would cause us to shift our delay base + // which then get's into a death spiral of shifting delay bases +/* if (prev_delay_base != 0 && + wrapping_compare_less(conn->our_hist.delay_base, prev_delay_base)) { + // never adjust more than 10 milliseconds + if (prev_delay_base - conn->our_hist.delay_base <= 10000) { + conn->their_hist.Shift(prev_delay_base - conn->our_hist.delay_base); + } + } +*/ + + // if the delay estimate exceeds the RTT, adjust the base_delay to + // compensate + assert(min_rtt >= 0); + if (int64(conn->our_hist.get_value()) > min_rtt) { + conn->our_hist.shift((uint32)(conn->our_hist.get_value() - min_rtt)); + } + + // only apply the congestion controller on acks + // if we don't have a delay measurement, there's + // no point in invoking the congestion control + if (actual_delay != 0 && acked_bytes >= 1) + conn->apply_ccontrol(acked_bytes, actual_delay, min_rtt); + + // sanity check, the other end should never ack packets + // past the point we've sent + if (acks <= conn->cur_window_packets) { + conn->max_window_user = pf1->windowsize; + + // If max user window is set to 0, then we startup a timer + // That will reset it to 1 after 15 seconds. + if (conn->max_window_user == 0) + // Reset max_window_user to 1 every 15 seconds. + conn->zerowindow_time = conn->ctx->current_ms + 15000; + + // Respond to connect message + // Switch to CONNECTED state. + // If this is an ack and we're in still handshaking + // transition over to the connected state. + + // Incoming connection completion + if (pk_flags == ST_DATA && conn->state == CS_SYN_RECV) { + conn->state = CS_CONNECTED; + } + + // Outgoing connection completion + if (pk_flags == ST_STATE && conn->state == CS_SYN_SENT) { + conn->state = CS_CONNECTED; + + // If the user has defined the ON_CONNECT callback, use that to + // notify the user that the socket is now connected. If ON_CONNECT + // has not been defined, notify the user via ON_STATE_CHANGE. + if (conn->ctx->callbacks[UTP_ON_CONNECT]) + utp_call_on_connect(conn->ctx, conn); + else + utp_call_on_state_change(conn->ctx, conn, UTP_STATE_CONNECT); + + // We've sent a fin, and everything was ACKed (including the FIN), + // it's safe to destroy the socket. cur_window_packets == acks + // means that this packet acked all the remaining packets that + // were in-flight. + } else if (conn->state == CS_FIN_SENT && conn->cur_window_packets == acks) { + conn->state = CS_DESTROY; + } + + // Update fast resend counter + if (wrapping_compare_less(conn->fast_resend_seq_nr + , (pk_ack_nr + 1) & ACK_NR_MASK, ACK_NR_MASK)) + conn->fast_resend_seq_nr = (pk_ack_nr + 1) & ACK_NR_MASK; + + #if UTP_DEBUG_LOGGING + conn->log(UTP_LOG_DEBUG, "fast_resend_seq_nr:%u", conn->fast_resend_seq_nr); + #endif + + for (int i = 0; i < acks; ++i) { + int ack_status = conn->ack_packet(conn->seq_nr - conn->cur_window_packets); + // if ack_status is 0, the packet was acked. + // if acl_stauts is 1, it means that the packet had already been acked + // if it's 2, the packet has not been sent yet + // We need to break this loop in the latter case. This could potentially + // happen if we get an ack_nr that does not exceed what we have stuffed + // into the outgoing buffer, but does exceed what we have sent + if (ack_status == 2) { + #ifdef _DEBUG + OutgoingPacket* pkt = (OutgoingPacket*)conn->outbuf.get(conn->seq_nr - conn->cur_window_packets); + assert(pkt->transmissions == 0); + #endif + + break; + } + conn->cur_window_packets--; + + #if UTP_DEBUG_LOGGING + conn->log(UTP_LOG_DEBUG, "decementing cur_window_packets:%u", conn->cur_window_packets); + #endif + + } + + #ifdef _DEBUG + if (conn->cur_window_packets == 0) + assert(conn->cur_window == 0); + #endif + + // packets in front of this may have been acked by a + // selective ack (EACK). Keep decreasing the window packet size + // until we hit a packet that is still waiting to be acked + // in the send queue + // this is especially likely to happen when the other end + // has the EACK send bug older versions of uTP had + while (conn->cur_window_packets > 0 && !conn->outbuf.get(conn->seq_nr - conn->cur_window_packets)) { + conn->cur_window_packets--; + + #if UTP_DEBUG_LOGGING + conn->log(UTP_LOG_DEBUG, "decementing cur_window_packets:%u", conn->cur_window_packets); + #endif + + } + + #ifdef _DEBUG + if (conn->cur_window_packets == 0) + assert(conn->cur_window == 0); + #endif + + // this invariant should always be true + assert(conn->cur_window_packets == 0 || conn->outbuf.get(conn->seq_nr - conn->cur_window_packets)); + + // flush Nagle + if (conn->cur_window_packets == 1) { + OutgoingPacket *pkt = (OutgoingPacket*)conn->outbuf.get(conn->seq_nr - 1); + // do we still have quota? + if (pkt->transmissions == 0) { + conn->send_packet(pkt); + } + } + + // Fast timeout-retry + if (conn->fast_timeout) { + + #if UTP_DEBUG_LOGGING + conn->log(UTP_LOG_DEBUG, "Fast timeout %u,%u,%u?", (uint)conn->cur_window, conn->seq_nr - conn->timeout_seq_nr, conn->timeout_seq_nr); + #endif + + // if the fast_resend_seq_nr is not pointing to the oldest outstanding packet, it suggests that we've already + // resent the packet that timed out, and we should leave the fast-timeout mode. + if (((conn->seq_nr - conn->cur_window_packets) & ACK_NR_MASK) != conn->fast_resend_seq_nr) { + conn->fast_timeout = false; + } else { + // resend the oldest packet and increment fast_resend_seq_nr + // to not allow another fast resend on it again + OutgoingPacket *pkt = (OutgoingPacket*)conn->outbuf.get(conn->seq_nr - conn->cur_window_packets); + if (pkt && pkt->transmissions > 0) { + + #if UTP_DEBUG_LOGGING + conn->log(UTP_LOG_DEBUG, "Packet %u fast timeout-retry.", conn->seq_nr - conn->cur_window_packets); + #endif + + #ifdef _DEBUG + ++conn->_stats.fastrexmit; + #endif + + conn->fast_resend_seq_nr++; + conn->send_packet(pkt); + } + } + } + } + + // Process selective acknowledgent + if (selack_ptr != NULL) { + conn->selective_ack(pk_ack_nr + 2, selack_ptr, selack_ptr[-1]); + } + + // this invariant should always be true + assert(conn->cur_window_packets == 0 || conn->outbuf.get(conn->seq_nr - conn->cur_window_packets)); + + #if UTP_DEBUG_LOGGING + conn->log(UTP_LOG_DEBUG, "acks:%d acked_bytes:%u seq_nr:%u cur_window:%u cur_window_packets:%u ", + acks, (uint)acked_bytes, conn->seq_nr, (uint)conn->cur_window, conn->cur_window_packets); + #endif + + // In case the ack dropped the current window below + // the max_window size, Mark the socket as writable + if (conn->state == CS_CONNECTED_FULL && !conn->is_full()) { + conn->state = CS_CONNECTED; + #if UTP_DEBUG_LOGGING + conn->log(UTP_LOG_DEBUG, "Socket writable. max_window:%u cur_window:%u packet_size:%u", + (uint)conn->max_window, (uint)conn->cur_window, (uint)conn->get_packet_size()); + #endif + utp_call_on_state_change(conn->ctx, conn, UTP_STATE_WRITABLE); + } + + if (pk_flags == ST_STATE) { + // This is a state packet only. + return 0; + } + + // The connection is not in a state that can accept data? + if (conn->state != CS_CONNECTED && + conn->state != CS_CONNECTED_FULL && + conn->state != CS_FIN_SENT) { + return 0; + } + + // Is this a finalize packet? + if (pk_flags == ST_FIN && !conn->got_fin) { + + #if UTP_DEBUG_LOGGING + conn->log(UTP_LOG_DEBUG, "Got FIN eof_pkt:%u", pk_seq_nr); + #endif + + conn->got_fin = true; + conn->eof_pkt = pk_seq_nr; + // at this point, it is possible for the + // other end to have sent packets with + // sequence numbers higher than seq_nr. + // if this is the case, our reorder_count + // is out of sync. This case is dealt with + // when we re-order and hit the eof_pkt. + // we'll just ignore any packets with + // sequence numbers past this + } + + // Getting an in-order packet? + if (seqnr == 0) { + size_t count = packet_end - data; + if (count > 0 && conn->state != CS_FIN_SENT) { + + #if UTP_DEBUG_LOGGING + conn->log(UTP_LOG_DEBUG, "Got Data len:%u (rb:%u)", (uint)count, (uint)utp_call_get_read_buffer_size(conn->ctx, conn)); + #endif + + // Post bytes to the upper layer + utp_call_on_read(conn->ctx, conn, data, count); + } + conn->ack_nr++; + + // Check if the next packet has been received too, but waiting + // in the reorder buffer. + for (;;) { + + if (conn->got_fin && conn->eof_pkt == conn->ack_nr) { + if (conn->state != CS_FIN_SENT) { + conn->state = CS_GOT_FIN; + conn->rto_timeout = conn->ctx->current_ms + min(conn->rto * 3, 60); + + + #if UTP_DEBUG_LOGGING + conn->log(UTP_LOG_DEBUG, "Posting EOF"); + #endif + + utp_call_on_state_change(conn->ctx, conn, UTP_STATE_EOF); + } + + // if the other end wants to close, ack + conn->send_ack(); + + // reorder_count is not necessarily 0 at this point. + // even though it is most of the time, the other end + // may have sent packets with higher sequence numbers + // than what later end up being eof_pkt + // since we have received all packets up to eof_pkt + // just ignore the ones after it. + conn->reorder_count = 0; + } + + // Quick get-out in case there is nothing to reorder + if (conn->reorder_count == 0) + break; + + // Check if there are additional buffers in the reorder buffers + // that need delivery. + byte *p = (byte*)conn->inbuf.get(conn->ack_nr+1); + if (p == NULL) + break; + conn->inbuf.put(conn->ack_nr+1, NULL); + count = *(uint*)p; + if (count > 0 && conn->state != CS_FIN_SENT) { + // Pass the bytes to the upper layer + utp_call_on_read(conn->ctx, conn, p + sizeof(uint), count); + } + conn->ack_nr++; + + // Free the element from the reorder buffer + free(p); + assert(conn->reorder_count > 0); + conn->reorder_count--; + } + + conn->schedule_ack(); + } else { + // Getting an out of order packet. + // The packet needs to be remembered and rearranged later. + + // if we have received a FIN packet, and the EOF-sequence number + // is lower than the sequence number of the packet we just received + // something is wrong. + if (conn->got_fin && pk_seq_nr > conn->eof_pkt) { + + #if UTP_DEBUG_LOGGING + conn->log(UTP_LOG_DEBUG, "Got an invalid packet sequence number, past EOF " + "reorder_count:%u len:%u (rb:%u)", + conn->reorder_count, (uint)(packet_end - data), (uint)utp_call_get_read_buffer_size(conn->ctx, conn)); + #endif + return 0; + } + + // if the sequence number is entirely off the expected + // one, just drop it. We can't allocate buffer space in + // the inbuf entirely based on untrusted input + if (seqnr > 0x3ff) { + + #if UTP_DEBUG_LOGGING + conn->log(UTP_LOG_DEBUG, "0x%08x: Got an invalid packet sequence number, too far off " + "reorder_count:%u len:%u (rb:%u)", + conn->reorder_count, (uint)(packet_end - data), (uint)utp_call_get_read_buffer_size(conn->ctx, conn)); + #endif + return 0; + } + + // we need to grow the circle buffer before we + // check if the packet is already in here, so that + // we don't end up looking at an older packet (since + // the indices wraps around). + conn->inbuf.ensure_size(pk_seq_nr + 1, seqnr + 1); + + // Has this packet already been received? (i.e. a duplicate) + // If that is the case, just discard it. + if (conn->inbuf.get(pk_seq_nr) != NULL) { + #ifdef _DEBUG + ++conn->_stats.nduprecv; + #endif + + return 0; + } + + // Allocate memory to fit the packet that needs to re-ordered + byte *mem = (byte*)malloc((packet_end - data) + sizeof(uint)); + *(uint*)mem = (uint)(packet_end - data); + memcpy(mem + sizeof(uint), data, packet_end - data); + + // Insert into reorder buffer and increment the count + // of # of packets to be reordered. + // we add one to seqnr in order to leave the last + // entry empty, that way the assert in send_ack + // is valid. we have to add one to seqnr too, in order + // to make the circular buffer grow around the correct + // point (which is conn->ack_nr + 1). + assert(conn->inbuf.get(pk_seq_nr) == NULL); + assert((pk_seq_nr & conn->inbuf.mask) != ((conn->ack_nr+1) & conn->inbuf.mask)); + conn->inbuf.put(pk_seq_nr, mem); + conn->reorder_count++; + + #if UTP_DEBUG_LOGGING + conn->log(UTP_LOG_DEBUG, "0x%08x: Got out of order data reorder_count:%u len:%u (rb:%u)", + conn->reorder_count, (uint)(packet_end - data), (uint)utp_call_get_read_buffer_size(conn->ctx, conn)); + #endif + + conn->schedule_ack(); + } + + return (size_t)(packet_end - data); +} + +inline byte UTP_Version(PacketFormatV1 const* pf) +{ + return (pf->type() < ST_NUM_STATES && pf->ext < 3 ? pf->version() : 0); +} + +UTPSocket::~UTPSocket() +{ + #if UTP_DEBUG_LOGGING + log(UTP_LOG_DEBUG, "Killing socket"); + #endif + + utp_call_on_state_change(ctx, this, UTP_STATE_DESTROYING); + + if (ctx->last_utp_socket == this) { + ctx->last_utp_socket = NULL; + } + + // Remove object from the global hash table + UTPSocketKeyData* kd = ctx->utp_sockets->Delete(UTPSocketKey(addr, conn_id_recv)); + assert(kd); + + // remove the socket from ack_sockets if it was there also + removeSocketFromAckList(this); + + // Free all memory occupied by the socket object. + for (size_t i = 0; i <= inbuf.mask; i++) { + free(inbuf.elements[i]); + } + for (size_t i = 0; i <= outbuf.mask; i++) { + free(outbuf.elements[i]); + } + // TODO: The circular buffer should have a destructor + free(inbuf.elements); + free(outbuf.elements); +} + +void UTP_FreeAll(struct UTPSocketHT *utp_sockets) { + utp_hash_iterator_t it; + UTPSocketKeyData* keyData; + while ((keyData = utp_sockets->Iterate(it))) { + delete keyData->socket; + } +} + +void utp_initialize_socket( utp_socket *conn, + const struct sockaddr *addr, + socklen_t addrlen, + bool need_seed_gen, + uint32 conn_seed, + uint32 conn_id_recv, + uint32 conn_id_send) +{ + PackedSockAddr psaddr = PackedSockAddr((const SOCKADDR_STORAGE*)addr, addrlen); + + if (need_seed_gen) { + do { + conn_seed = utp_call_get_random(conn->ctx, conn); + // we identify v1 and higher by setting the first two bytes to 0x0001 + conn_seed &= 0xffff; + } while (conn->ctx->utp_sockets->Lookup(UTPSocketKey(psaddr, conn_seed))); + + conn_id_recv += conn_seed; + conn_id_send += conn_seed; + } + + conn->state = CS_IDLE; + conn->conn_seed = conn_seed; + conn->conn_id_recv = conn_id_recv; + conn->conn_id_send = conn_id_send; + conn->addr = psaddr; + conn->ctx->current_ms = utp_call_get_milliseconds(conn->ctx, NULL); + conn->last_got_packet = conn->ctx->current_ms; + conn->last_sent_packet = conn->ctx->current_ms; + conn->last_measured_delay = conn->ctx->current_ms + 0x70000000; + conn->average_sample_time = conn->ctx->current_ms + 5000; + conn->last_rwin_decay = conn->ctx->current_ms - MAX_WINDOW_DECAY; + + conn->our_hist.clear(conn->ctx->current_ms); + conn->their_hist.clear(conn->ctx->current_ms); + conn->rtt_hist.clear(conn->ctx->current_ms); + + // initialize MTU floor and ceiling + conn->mtu_reset(); + conn->mtu_last = conn->mtu_ceiling; + + conn->ctx->utp_sockets->Add(UTPSocketKey(conn->addr, conn->conn_id_recv))->socket = conn; + + // we need to fit one packet in the window when we start the connection + conn->max_window = conn->get_packet_size(); + + #if UTP_DEBUG_LOGGING + conn->log(UTP_LOG_DEBUG, "UTP socket initialized"); + #endif +} + +utp_socket* utp_create_socket(utp_context *ctx) +{ + assert(ctx); + if (!ctx) return NULL; + + UTPSocket *conn = new UTPSocket; // TODO: UTPSocket should have a constructor + + conn->state = CS_UNINITIALIZED; + conn->ctx = ctx; + conn->userdata = NULL; + conn->reorder_count = 0; + conn->duplicate_ack = 0; + conn->timeout_seq_nr = 0; + conn->last_rcv_win = 0; + conn->got_fin = false; + conn->fast_timeout = false; + conn->rtt = 0; + conn->retransmit_timeout = 0; + conn->rto_timeout = 0; + conn->zerowindow_time = 0; + conn->average_delay = 0; + conn->current_delay_samples = 0; + conn->cur_window = 0; + conn->eof_pkt = 0; + conn->last_maxed_out_window = 0; + conn->mtu_probe_seq = 0; + conn->mtu_probe_size = 0; + conn->current_delay_sum = 0; + conn->average_delay_base = 0; + conn->retransmit_count = 0; + conn->rto = 3000; + conn->rtt_var = 800; + conn->seq_nr = 1; + conn->ack_nr = 0; + conn->max_window_user = 255 * PACKET_SIZE; + conn->cur_window_packets = 0; + conn->fast_resend_seq_nr = conn->seq_nr; + conn->target_delay = ctx->target_delay; + conn->reply_micro = 0; + conn->opt_sndbuf = ctx->opt_sndbuf; + conn->opt_rcvbuf = ctx->opt_rcvbuf; + conn->slow_start = true; + conn->ssthresh = conn->opt_sndbuf; + conn->clock_drift = 0; + conn->clock_drift_raw = 0; + conn->outbuf.mask = 15; + conn->inbuf.mask = 15; + conn->outbuf.elements = (void**)calloc(16, sizeof(void*)); + conn->inbuf.elements = (void**)calloc(16, sizeof(void*)); + conn->ida = -1; // set the index of every new socket in ack_sockets to + // -1, which also means it is not in ack_sockets yet + + memset(conn->extensions, 0, sizeof(conn->extensions)); + + #ifdef _DEBUG + memset(&conn->_stats, 0, sizeof(utp_socket_stats)); + #endif + + return conn; +} + +int utp_context_set_option(utp_context *ctx, int opt, int val) +{ + assert(ctx); + if (!ctx) return -1; + + switch (opt) { + case UTP_LOG_NORMAL: + ctx->log_normal = val ? true : false; + return 0; + + case UTP_LOG_MTU: + ctx->log_mtu = val ? true : false; + return 0; + + case UTP_LOG_DEBUG: + ctx->log_debug = val ? true : false; + return 0; + + case UTP_TARGET_DELAY: + ctx->target_delay = val; + return 0; + + case UTP_SNDBUF: + assert(val >= 1); + ctx->opt_sndbuf = val; + return 0; + + case UTP_RCVBUF: + assert(val >= 1); + ctx->opt_rcvbuf = val; + return 0; + } + return -1; +} + +int utp_context_get_option(utp_context *ctx, int opt) +{ + assert(ctx); + if (!ctx) return -1; + + switch (opt) { + case UTP_LOG_NORMAL: return ctx->log_normal ? 1 : 0; + case UTP_LOG_MTU: return ctx->log_mtu ? 1 : 0; + case UTP_LOG_DEBUG: return ctx->log_debug ? 1 : 0; + case UTP_TARGET_DELAY: return ctx->target_delay; + case UTP_SNDBUF: return ctx->opt_sndbuf; + case UTP_RCVBUF: return ctx->opt_rcvbuf; + } + return -1; +} + + +int utp_setsockopt(UTPSocket* conn, int opt, int val) +{ + assert(conn); + if (!conn) return -1; + + switch (opt) { + + case UTP_SNDBUF: + assert(val >= 1); + conn->opt_sndbuf = val; + return 0; + + case UTP_RCVBUF: + assert(val >= 1); + conn->opt_rcvbuf = val; + return 0; + + case UTP_TARGET_DELAY: + conn->target_delay = val; + return 0; + } + + return -1; +} + +int utp_getsockopt(UTPSocket* conn, int opt) +{ + assert(conn); + if (!conn) return -1; + + switch (opt) { + case UTP_SNDBUF: return conn->opt_sndbuf; + case UTP_RCVBUF: return conn->opt_rcvbuf; + case UTP_TARGET_DELAY: return conn->target_delay; + } + + return -1; +} + +// Try to connect to a specified host. +int utp_connect(utp_socket *conn, const struct sockaddr *to, socklen_t tolen) +{ + assert(conn); + if (!conn) return -1; + + assert(conn->state == CS_UNINITIALIZED); + if (conn->state != CS_UNINITIALIZED) { + conn->state = CS_DESTROY; + return -1; + } + + utp_initialize_socket(conn, to, tolen, true, 0, 0, 1); + + assert(conn->cur_window_packets == 0); + assert(conn->outbuf.get(conn->seq_nr) == NULL); + assert(sizeof(PacketFormatV1) == 20); + + conn->state = CS_SYN_SENT; + conn->ctx->current_ms = utp_call_get_milliseconds(conn->ctx, conn); + + // Create and send a connect message + + // used in parse_log.py + conn->log(UTP_LOG_NORMAL, "UTP_Connect conn_seed:%u packet_size:%u (B) " + "target_delay:%u (ms) delay_history:%u " + "delay_base_history:%u (minutes)", + conn->conn_seed, PACKET_SIZE, conn->target_delay / 1000, + CUR_DELAY_SIZE, DELAY_BASE_HISTORY); + + // Setup initial timeout timer. + conn->retransmit_timeout = 3000; + conn->rto_timeout = conn->ctx->current_ms + conn->retransmit_timeout; + conn->last_rcv_win = conn->get_rcv_window(); + + // if you need compatibiltiy with 1.8.1, use this. it increases attackability though. + //conn->seq_nr = 1; + conn->seq_nr = utp_call_get_random(conn->ctx, conn); + + // Create the connect packet. + const size_t header_size = sizeof(PacketFormatV1); + + OutgoingPacket *pkt = (OutgoingPacket*)malloc(sizeof(OutgoingPacket) - 1 + header_size); + PacketFormatV1* p1 = (PacketFormatV1*)pkt->data; + + memset(p1, 0, header_size); + // SYN packets are special, and have the receive ID in the connid field, + // instead of conn_id_send. + p1->set_version(1); + p1->set_type(ST_SYN); + p1->ext = 0; + p1->connid = conn->conn_id_recv; + p1->windowsize = (uint32)conn->last_rcv_win; + p1->seq_nr = conn->seq_nr; + pkt->transmissions = 0; + pkt->length = header_size; + pkt->payload = 0; + + /* + #if UTP_DEBUG_LOGGING + conn->log(UTP_LOG_DEBUG, "Sending connect %s [%u].", + addrfmt(conn->addr, addrbuf), conn_seed); + #endif + */ + + // Remember the message in the outgoing queue. + conn->outbuf.ensure_size(conn->seq_nr, conn->cur_window_packets); + conn->outbuf.put(conn->seq_nr, pkt); + conn->seq_nr++; + conn->cur_window_packets++; + + #if UTP_DEBUG_LOGGING + conn->log(UTP_LOG_DEBUG, "incrementing cur_window_packets:%u", conn->cur_window_packets); + #endif + + conn->send_packet(pkt); + return 0; +} + +// Returns 1 if the UDP payload was recognized as a UTP packet, or 0 if it was not +int utp_process_udp(utp_context *ctx, const byte *buffer, size_t len, const struct sockaddr *to, socklen_t tolen) +{ + assert(ctx); + if (!ctx) return 0; + + assert(buffer); + if (!buffer) return 0; + + assert(to); + if (!to) return 0; + + const PackedSockAddr addr((const SOCKADDR_STORAGE*)to, tolen); + + if (len < sizeof(PacketFormatV1)) { + #if UTP_DEBUG_LOGGING + ctx->log(UTP_LOG_DEBUG, NULL, "recv %s len:%u too small", addrfmt(addr, addrbuf), (uint)len); + #endif + return 0; + } + + const PacketFormatV1 *pf1 = (PacketFormatV1*)buffer; + const byte version = UTP_Version(pf1); + const uint32 id = uint32(pf1->connid); + + if (version != 1) { + #if UTP_DEBUG_LOGGING + ctx->log(UTP_LOG_DEBUG, NULL, "recv %s len:%u version:%u unsupported version", addrfmt(addr, addrbuf), (uint)len, version); + #endif + + return 0; + } + + #if UTP_DEBUG_LOGGING + ctx->log(UTP_LOG_DEBUG, NULL, "recv %s len:%u id:%u", addrfmt(addr, addrbuf), (uint)len, id); + ctx->log(UTP_LOG_DEBUG, NULL, "recv id:%u seq_nr:%u ack_nr:%u", id, (uint)pf1->seq_nr, (uint)pf1->ack_nr); + #endif + + const byte flags = pf1->type(); + + if (flags == ST_RESET) { + // id is either our recv id or our send id + // if it's our send id, and we initiated the connection, our recv id is id + 1 + // if it's our send id, and we did not initiate the connection, our recv id is id - 1 + // we have to check every case + + UTPSocketKeyData* keyData; + if ( (keyData = ctx->utp_sockets->Lookup(UTPSocketKey(addr, id))) || + ((keyData = ctx->utp_sockets->Lookup(UTPSocketKey(addr, id + 1))) && keyData->socket->conn_id_send == id) || + ((keyData = ctx->utp_sockets->Lookup(UTPSocketKey(addr, id - 1))) && keyData->socket->conn_id_send == id)) + { + UTPSocket* conn = keyData->socket; + + #if UTP_DEBUG_LOGGING + ctx->log(UTP_LOG_DEBUG, NULL, "recv RST for existing connection"); + #endif + + if (conn->state == CS_FIN_SENT) + conn->state = CS_DESTROY; + else + conn->state = CS_RESET; + + utp_call_on_overhead_statistics(conn->ctx, conn, false, len + conn->get_udp_overhead(), close_overhead); + const int err = (conn->state == CS_SYN_SENT) ? UTP_ECONNREFUSED : UTP_ECONNRESET; + utp_call_on_error(conn->ctx, conn, err); + } + else { + #if UTP_DEBUG_LOGGING + ctx->log(UTP_LOG_DEBUG, NULL, "recv RST for unknown connection"); + #endif + } + return 1; + } + else if (flags != ST_SYN) { + UTPSocket* conn = NULL; + + if (ctx->last_utp_socket && ctx->last_utp_socket->addr == addr && ctx->last_utp_socket->conn_id_recv == id) { + conn = ctx->last_utp_socket; + } else { + UTPSocketKeyData* keyData = ctx->utp_sockets->Lookup(UTPSocketKey(addr, id)); + if (keyData) { + conn = keyData->socket; + ctx->last_utp_socket = conn; + } + } + + if (conn) { + + #if UTP_DEBUG_LOGGING + ctx->log(UTP_LOG_DEBUG, NULL, "recv processing"); + #endif + + const size_t read = utp_process_incoming(conn, buffer, len); + utp_call_on_overhead_statistics(conn->ctx, conn, false, (len - read) + conn->get_udp_overhead(), header_overhead); + return 1; + } + } + + // We have not found a matching utp_socket, and this isn't a SYN. Reject it. + const uint32 seq_nr = pf1->seq_nr; + if (flags != ST_SYN) { + ctx->current_ms = utp_call_get_milliseconds(ctx, NULL); + + for (size_t i = 0; i < ctx->rst_info.GetCount(); i++) { + if ((ctx->rst_info[i].connid == id) && + (ctx->rst_info[i].addr == addr) && + (ctx->rst_info[i].ack_nr == seq_nr)) + { + ctx->rst_info[i].timestamp = ctx->current_ms; + + #if UTP_DEBUG_LOGGING + ctx->log(UTP_LOG_DEBUG, NULL, "recv not sending RST to non-SYN (stored)"); + #endif + + return 1; + } + } + + if (ctx->rst_info.GetCount() > RST_INFO_LIMIT) { + + #if UTP_DEBUG_LOGGING + ctx->log(UTP_LOG_DEBUG, NULL, "recv not sending RST to non-SYN (limit at %u stored)", (uint)ctx->rst_info.GetCount()); + #endif + + return 1; + } + + #if UTP_DEBUG_LOGGING + ctx->log(UTP_LOG_DEBUG, NULL, "recv send RST to non-SYN (%u stored)", (uint)ctx->rst_info.GetCount()); + #endif + + RST_Info &r = ctx->rst_info.Append(); + r.addr = addr; + r.connid = id; + r.ack_nr = seq_nr; + r.timestamp = ctx->current_ms; + + UTPSocket::send_rst(ctx, addr, id, seq_nr, utp_call_get_random(ctx, NULL)); + return 1; + } + + if (ctx->callbacks[UTP_ON_ACCEPT]) { + + #if UTP_DEBUG_LOGGING + ctx->log(UTP_LOG_DEBUG, NULL, "Incoming connection from %s", addrfmt(addr, addrbuf)); + #endif + + UTPSocketKeyData* keyData = ctx->utp_sockets->Lookup(UTPSocketKey(addr, id + 1)); + if (keyData) { + + #if UTP_DEBUG_LOGGING + ctx->log(UTP_LOG_DEBUG, NULL, "rejected incoming connection, connection already exists"); + #endif + + return 1; + } + + if (ctx->utp_sockets->GetCount() > 3000) { + + #if UTP_DEBUG_LOGGING + ctx->log(UTP_LOG_DEBUG, NULL, "rejected incoming connection, too many uTP sockets %d", ctx->utp_sockets->GetCount()); + #endif + + return 1; + } + // true means yes, block connection. false means no, don't block. + if (utp_call_on_firewall(ctx, to, tolen)) { + + #if UTP_DEBUG_LOGGING + ctx->log(UTP_LOG_DEBUG, NULL, "rejected incoming connection, firewall callback returned true"); + #endif + + return 1; + } + + // Create a new UTP socket to handle this new connection + UTPSocket *conn = utp_create_socket(ctx); + utp_initialize_socket(conn, to, tolen, false, id, id+1, id); + conn->ack_nr = seq_nr; + conn->seq_nr = utp_call_get_random(ctx, NULL); + conn->fast_resend_seq_nr = conn->seq_nr; + conn->state = CS_SYN_RECV; + + const size_t read = utp_process_incoming(conn, buffer, len, true); + + #if UTP_DEBUG_LOGGING + ctx->log(UTP_LOG_DEBUG, NULL, "recv send connect ACK"); + #endif + + conn->send_ack(true); + + utp_call_on_accept(ctx, conn, to, tolen); + + // we report overhead after on_accept(), because the callbacks are setup now + utp_call_on_overhead_statistics(conn->ctx, conn, false, (len - read) + conn->get_udp_overhead(), header_overhead); // SYN + utp_call_on_overhead_statistics(conn->ctx, conn, true, conn->get_overhead(), ack_overhead); // SYNACK + } + else { + + #if UTP_DEBUG_LOGGING + ctx->log(UTP_LOG_DEBUG, NULL, "rejected incoming connection, UTP_ON_ACCEPT callback not set"); + #endif + + } + + return 1; +} + +// Called by utp_process_icmp_fragmentation() and utp_process_icmp_error() below +static UTPSocket* parse_icmp_payload(utp_context *ctx, const byte *buffer, size_t len, const struct sockaddr *to, socklen_t tolen) +{ + assert(ctx); + if (!ctx) return NULL; + + assert(buffer); + if (!buffer) return NULL; + + assert(to); + if (!to) return NULL; + + const PackedSockAddr addr((const SOCKADDR_STORAGE*)to, tolen); + + // ICMP packets are only required to quote the first 8 bytes of the layer4 + // payload. The UDP payload is 8 bytes, and the UTP header is another 20 + // bytes. So, in order to find the entire UTP header, we need the ICMP + // packet to quote 28 bytes. + if (len < sizeof(PacketFormatV1)) { + #if UTP_DEBUG_LOGGING + ctx->log(UTP_LOG_DEBUG, NULL, "Ignoring ICMP from %s: runt length %d", addrfmt(addr, addrbuf), len); + #endif + return NULL; + } + + const PacketFormatV1 *pf = (PacketFormatV1*)buffer; + const byte version = UTP_Version(pf); + const uint32 id = uint32(pf->connid); + + if (version != 1) { + #if UTP_DEBUG_LOGGING + ctx->log(UTP_LOG_DEBUG, NULL, "Ignoring ICMP from %s: not UTP version 1", addrfmt(addr, addrbuf)); + #endif + return NULL; + } + + UTPSocketKeyData* keyData; + + if ( (keyData = ctx->utp_sockets->Lookup(UTPSocketKey(addr, id))) || + ((keyData = ctx->utp_sockets->Lookup(UTPSocketKey(addr, id + 1))) && keyData->socket->conn_id_send == id) || + ((keyData = ctx->utp_sockets->Lookup(UTPSocketKey(addr, id - 1))) && keyData->socket->conn_id_send == id)) + { + return keyData->socket; + } + + #if UTP_DEBUG_LOGGING + ctx->log(UTP_LOG_DEBUG, NULL, "Ignoring ICMP from %s: No matching connection found for id %u", addrfmt(addr, addrbuf), id); + #endif + return NULL; +} + +// Should be called when an ICMP Type 3, Code 4 packet (fragmentation needed) is received, to adjust the MTU +// +// Returns 1 if the UDP payload (delivered in the ICMP packet) was recognized as a UTP packet, or 0 if it was not +// +// @ctx: utp_context +// @buf: Contents of the original UDP payload, which the ICMP packet quoted. *Not* the ICMP packet itself. +// @len: buffer length +// @to: destination address of the original UDP pakcet +// @tolen: address length +// @next_hop_mtu: +int utp_process_icmp_fragmentation(utp_context *ctx, const byte* buffer, size_t len, const struct sockaddr *to, socklen_t tolen, uint16 next_hop_mtu) +{ + UTPSocket* conn = parse_icmp_payload(ctx, buffer, len, to, tolen); + if (!conn) return 0; + + // Constrain the next_hop_mtu to sane values. It might not be initialized or sent properly + if (next_hop_mtu >= 576 && next_hop_mtu < 0x2000) { + conn->mtu_ceiling = min(next_hop_mtu, conn->mtu_ceiling); + conn->mtu_search_update(); + // this is something of a speecial case, where we don't set mtu_last + // to the value in between the floor and the ceiling. We can update the + // floor, because there might be more network segments after the one + // that sent this ICMP with smaller MTUs. But we want to test this + // MTU size first. If the next probe gets through, mtu_floor is updated + conn->mtu_last = conn->mtu_ceiling; + } else { + // Otherwise, binary search. At this point we don't actually know + // what size the packet that failed was, and apparently we can't + // trust the next hop mtu either. It seems reasonably conservative + // to just lower the ceiling. This should not happen on working networks + // anyway. + conn->mtu_ceiling = (conn->mtu_floor + conn->mtu_ceiling) / 2; + conn->mtu_search_update(); + } + + conn->log(UTP_LOG_MTU, "MTU [ICMP] floor:%d ceiling:%d current:%d", conn->mtu_floor, conn->mtu_ceiling, conn->mtu_last); + return 1; +} + +// Should be called when an ICMP message is received that should tear down the connection. +// +// Returns 1 if the UDP payload (delivered in the ICMP packet) was recognized as a UTP packet, or 0 if it was not +// +// @ctx: utp_context +// @buf: Contents of the original UDP payload, which the ICMP packet quoted. *Not* the ICMP packet itself. +// @len: buffer length +// @to: destination address of the original UDP pakcet +// @tolen: address length +int utp_process_icmp_error(utp_context *ctx, const byte *buffer, size_t len, const struct sockaddr *to, socklen_t tolen) +{ + UTPSocket* conn = parse_icmp_payload(ctx, buffer, len, to, tolen); + if (!conn) return 0; + + const int err = (conn->state == CS_SYN_SENT) ? UTP_ECONNREFUSED : UTP_ECONNRESET; + const PackedSockAddr addr((const SOCKADDR_STORAGE*)to, tolen); + + switch(conn->state) { + // Don't pass on errors for idle/closed connections + case CS_IDLE: + #if UTP_DEBUG_LOGGING + ctx->log(UTP_LOG_DEBUG, NULL, "ICMP from %s in state CS_IDLE, ignoring", addrfmt(addr, addrbuf)); + #endif + return 1; + + case CS_FIN_SENT: + #if UTP_DEBUG_LOGGING + ctx->log(UTP_LOG_DEBUG, NULL, "ICMP from %s in state CS_FIN_SENT, setting state to CS_DESTROY and causing error %d", addrfmt(addr, addrbuf), err); + #endif + conn->state = CS_DESTROY; + break; + + default: + #if UTP_DEBUG_LOGGING + ctx->log(UTP_LOG_DEBUG, NULL, "ICMP from %s, setting state to CS_RESET and causing error %d", addrfmt(addr, addrbuf), err); + #endif + conn->state = CS_RESET; + break; + } + + utp_call_on_error(conn->ctx, conn, err); + return 1; +} + +// Write bytes to the UTP socket. Returns the number of bytes written. +// 0 indicates the socket is no longer writable, -1 indicates an error +ssize_t utp_writev(utp_socket *conn, struct utp_iovec *iovec_input, size_t num_iovecs) +{ + static utp_iovec iovec[UTP_IOV_MAX]; + + assert(conn); + if (!conn) return -1; + + assert(iovec_input); + if (!iovec_input) return -1; + + assert(num_iovecs); + if (!num_iovecs) return -1; + + if (num_iovecs > UTP_IOV_MAX) + num_iovecs = UTP_IOV_MAX; + + memcpy(iovec, iovec_input, sizeof(struct utp_iovec)*num_iovecs); + + size_t bytes = 0; + size_t sent = 0; + for (size_t i = 0; i < num_iovecs; i++) + bytes += iovec[i].iov_len; + + #if UTP_DEBUG_LOGGING + size_t param = bytes; + #endif + + if (conn->state != CS_CONNECTED) { + #if UTP_DEBUG_LOGGING + conn->log(UTP_LOG_DEBUG, "UTP_Write %u bytes = false (not CS_CONNECTED)", (uint)bytes); + #endif + return 0; + } + + conn->ctx->current_ms = utp_call_get_milliseconds(conn->ctx, conn); + + // don't send unless it will all fit in the window + size_t packet_size = conn->get_packet_size(); + size_t num_to_send = min(bytes, packet_size); + while (!conn->is_full(num_to_send)) { + // Send an outgoing packet. + // Also add it to the outgoing of packets that have been sent but not ACKed. + + bytes -= num_to_send; + sent += num_to_send; + + #if UTP_DEBUG_LOGGING + conn->log(UTP_LOG_DEBUG, "Sending packet. seq_nr:%u ack_nr:%u wnd:%u/%u/%u rcv_win:%u size:%u cur_window_packets:%u", + conn->seq_nr, conn->ack_nr, + (uint)(conn->cur_window + num_to_send), + (uint)conn->max_window, (uint)conn->max_window_user, + (uint)conn->last_rcv_win, num_to_send, + conn->cur_window_packets); + #endif + conn->write_outgoing_packet(num_to_send, ST_DATA, iovec, num_iovecs); + num_to_send = min(bytes, packet_size); + + if (num_to_send == 0) { + #if UTP_DEBUG_LOGGING + conn->log(UTP_LOG_DEBUG, "UTP_Write %u bytes = true", (uint)param); + #endif + return sent; + } + } + + bool full = conn->is_full(); + if (full) { + // mark the socket as not being writable. + conn->state = CS_CONNECTED_FULL; + } + + #if UTP_DEBUG_LOGGING + conn->log(UTP_LOG_DEBUG, "UTP_Write %u bytes = %s", (uint)bytes, full ? "false" : "true"); + #endif + + // returns whether or not the socket is still writable + // if the congestion window is not full, we can still write to it + //return !full; + return sent; +} + +void utp_read_drained(utp_socket *conn) +{ + assert(conn); + if (!conn) return; + + assert(conn->state != CS_UNINITIALIZED); + if (conn->state == CS_UNINITIALIZED) return; + + const size_t rcvwin = conn->get_rcv_window(); + + if (rcvwin > conn->last_rcv_win) { + // If last window was 0 send ACK immediately, otherwise should set timer + if (conn->last_rcv_win == 0) { + conn->send_ack(); + } else { + conn->ctx->current_ms = utp_call_get_milliseconds(conn->ctx, conn); + conn->schedule_ack(); + } + } +} + +// Should be called each time the UDP socket is drained +void utp_issue_deferred_acks(utp_context *ctx) +{ + assert(ctx); + if (!ctx) return; + + for (size_t i = 0; i < ctx->ack_sockets.GetCount(); i++) { + UTPSocket *conn = ctx->ack_sockets[i]; + conn->send_ack(); + i--; + } +} + +// Should be called every 500ms +void utp_check_timeouts(utp_context *ctx) +{ + assert(ctx); + if (!ctx) return; + + ctx->current_ms = utp_call_get_milliseconds(ctx, NULL); + + if (ctx->current_ms - ctx->last_check < TIMEOUT_CHECK_INTERVAL) + return; + + ctx->last_check = ctx->current_ms; + + for (size_t i = 0; i < ctx->rst_info.GetCount(); i++) { + if ((int)(ctx->current_ms - ctx->rst_info[i].timestamp) >= RST_INFO_TIMEOUT) { + ctx->rst_info.MoveUpLast(i); + i--; + } + } + if (ctx->rst_info.GetCount() != ctx->rst_info.GetAlloc()) { + ctx->rst_info.Compact(); + } + + utp_hash_iterator_t it; + UTPSocketKeyData* keyData; + while ((keyData = ctx->utp_sockets->Iterate(it))) { + UTPSocket *conn = keyData->socket; + conn->check_timeouts(); + + // Check if the object was deleted + if (conn->state == CS_DESTROY) { + #if UTP_DEBUG_LOGGING + conn->log(UTP_LOG_DEBUG, "Destroying"); + #endif + delete conn; + } + } +} + +int utp_getpeername(utp_socket *conn, struct sockaddr *addr, socklen_t *addrlen) +{ + assert(addr); + if (!addr) return -1; + + assert(addrlen); + if (!addrlen) return -1; + + assert(conn); + if (!conn) return -1; + + assert(conn->state != CS_UNINITIALIZED); + if (conn->state == CS_UNINITIALIZED) return -1; + + socklen_t len; + const SOCKADDR_STORAGE sa = conn->addr.get_sockaddr_storage(&len); + *addrlen = min(len, *addrlen); + memcpy(addr, &sa, *addrlen); + return 0; +} + +int utp_get_delays(UTPSocket *conn, uint32 *ours, uint32 *theirs, uint32 *age) +{ + assert(conn); + if (!conn) return -1; + + assert(conn->state != CS_UNINITIALIZED); + if (conn->state == CS_UNINITIALIZED) { + if (ours) *ours = 0; + if (theirs) *theirs = 0; + if (age) *age = 0; + return -1; + } + + if (ours) *ours = conn->our_hist.get_value(); + if (theirs) *theirs = conn->their_hist.get_value(); + if (age) *age = (uint32)(conn->ctx->current_ms - conn->last_measured_delay); + return 0; +} + +// Close the UTP socket. +// It is not valid for the upper layer to refer to socket after it is closed. +// Data will keep to try being delivered after the close. +void utp_close(UTPSocket *conn) +{ + assert(conn); + if (!conn) return; + + assert(conn->state != CS_UNINITIALIZED + && conn->state != CS_DESTROY_DELAY + && conn->state != CS_FIN_SENT + && conn->state != CS_DESTROY); + + #if UTP_DEBUG_LOGGING + conn->log(UTP_LOG_DEBUG, "UTP_Close in state:%s", statenames[conn->state]); + #endif + + switch(conn->state) { + case CS_CONNECTED: + case CS_CONNECTED_FULL: + conn->state = CS_FIN_SENT; + conn->write_outgoing_packet(0, ST_FIN, NULL, 0); + break; + + case CS_SYN_SENT: + conn->rto_timeout = utp_call_get_milliseconds(conn->ctx, conn) + min(conn->rto * 2, 60); + // fall through + case CS_GOT_FIN: + conn->state = CS_DESTROY_DELAY; + break; + case CS_SYN_RECV: + // fall through + default: + conn->state = CS_DESTROY; + break; + } +} + +utp_context* utp_get_context(utp_socket *socket) { + assert(socket); + return socket ? socket->ctx : NULL; +} + +void* utp_set_userdata(utp_socket *socket, void *userdata) { + assert(socket); + if (socket) socket->userdata = userdata; + return socket ? socket->userdata : NULL; +} + +void* utp_get_userdata(utp_socket *socket) { + assert(socket); + return socket ? socket->userdata : NULL; +} + +void struct_utp_context::log(int level, utp_socket *socket, char const *fmt, ...) +{ + switch (level) { + case UTP_LOG_NORMAL: if (!log_normal) return; + case UTP_LOG_MTU: if (!log_mtu) return; + case UTP_LOG_DEBUG: if (!log_debug) return; + } + + va_list va; + char buf[4096]; + + va_start(va, fmt); + vsnprintf(buf, 4096, fmt, va); + buf[4095] = '\0'; + va_end(va); + + utp_call_log(this, socket, (const byte *)buf); +} + +utp_socket_stats* utp_get_stats(utp_socket *socket) +{ + #ifdef _DEBUG + assert(socket); + if (!socket) return NULL; + socket->_stats.mtu_guess = socket->mtu_last ? socket->mtu_last : socket->mtu_ceiling; + return &socket->_stats; + #else + return NULL; + #endif +} diff --git a/libs/libks/src/utp/utp_internal.h b/libs/libks/src/utp/utp_internal.h new file mode 100644 index 0000000000..8d6bf85c46 --- /dev/null +++ b/libs/libks/src/utp/utp_internal.h @@ -0,0 +1,139 @@ +/* + * Copyright (c) 2010-2013 BitTorrent, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef __UTP_INTERNAL_H__ +#define __UTP_INTERNAL_H__ + +#include +#include +#include +#include + +#include "utp.h" +#include "utp_callbacks.h" +#include "utp_templates.h" +#include "utp_hash.h" +#include "utp_hash.h" +#include "utp_packedsockaddr.h" + +/* These originally lived in utp_config.h */ +#define CCONTROL_TARGET (100 * 1000) // us + +enum bandwidth_type_t { + payload_bandwidth, connect_overhead, + close_overhead, ack_overhead, + header_overhead, retransmit_overhead +}; + +#ifdef WIN32 + #ifdef _MSC_VER + #include "libutp_inet_ntop.h" + #endif + + // newer versions of MSVC define these in errno.h + #ifndef ECONNRESET + #define ECONNRESET WSAECONNRESET + #define EMSGSIZE WSAEMSGSIZE + #define ECONNREFUSED WSAECONNREFUSED + #define ETIMEDOUT WSAETIMEDOUT + #endif +#endif + +struct PACKED_ATTRIBUTE RST_Info { + PackedSockAddr addr; + uint32 connid; + uint16 ack_nr; + uint64 timestamp; +}; + +// It's really important that we don't have duplicate keys in the hash table. +// If we do, we'll eventually crash. if we try to remove the second instance +// of the key, we'll accidentally remove the first instead. then later, +// checkTimeouts will try to access the second one's already freed memory. +void UTP_FreeAll(struct UTPSocketHT *utp_sockets); + +struct UTPSocketKey { + PackedSockAddr addr; + uint32 recv_id; // "conn_seed", "conn_id" + + UTPSocketKey(const PackedSockAddr& _addr, uint32 _recv_id) { + memset(this, 0, sizeof(*this)); + addr = _addr; + recv_id = _recv_id; + } + + bool operator == (const UTPSocketKey &other) const { + return recv_id == other.recv_id && addr == other.addr; + } + + uint32 compute_hash() const { + return recv_id ^ addr.compute_hash(); + } +}; + +struct UTPSocketKeyData { + UTPSocketKey key; + UTPSocket *socket; + utp_link_t link; +}; + +#define UTP_SOCKET_BUCKETS 79 +#define UTP_SOCKET_INIT 15 + +struct UTPSocketHT : utpHashTable { + UTPSocketHT() { + const int buckets = UTP_SOCKET_BUCKETS; + const int initial = UTP_SOCKET_INIT; + this->Create(buckets, initial); + } + ~UTPSocketHT() { + UTP_FreeAll(this); + this->Free(); + } +}; + +struct struct_utp_context { + void *userdata; + utp_callback_t* callbacks[UTP_ARRAY_SIZE]; + + uint64 current_ms; + utp_context_stats context_stats; + UTPSocket *last_utp_socket; + Array ack_sockets; + Array rst_info; + UTPSocketHT *utp_sockets; + size_t target_delay; + size_t opt_sndbuf; + size_t opt_rcvbuf; + uint64 last_check; + + struct_utp_context(); + ~struct_utp_context(); + + void log(int level, utp_socket *socket, char const *fmt, ...); + + bool log_normal:1; // log normal events? + bool log_mtu:1; // log MTU related events? + bool log_debug:1; // log debugging events? (Must also compile with UTP_DEBUG_LOGGING defined) +}; + +#endif //__UTP_INTERNAL_H__ diff --git a/libs/libks/src/utp/utp_packedsockaddr.cpp b/libs/libks/src/utp/utp_packedsockaddr.cpp new file mode 100644 index 0000000000..b31b2fe33c --- /dev/null +++ b/libs/libks/src/utp/utp_packedsockaddr.cpp @@ -0,0 +1,139 @@ +// vim:set ts=4 sw=4 ai: + +/* + * Copyright (c) 2010-2013 BitTorrent, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include +#include +#include + +#include "utp_types.h" +#include "utp_hash.h" +#include "utp_packedsockaddr.h" + +//#include "libutp_inet_ntop.h" + +byte PackedSockAddr::get_family() const +{ + #if defined(__sh__) + return ((_sin6d[0] == 0) && (_sin6d[1] == 0) && (_sin6d[2] == htonl(0xffff)) != 0) ? + AF_INET : AF_INET6; + #else + return (IN6_IS_ADDR_V4MAPPED(&_in._in6addr) != 0) ? AF_INET : AF_INET6; + #endif // defined(__sh__) +} + +bool PackedSockAddr::operator==(const PackedSockAddr& rhs) const +{ + if (&rhs == this) + return true; + if (_port != rhs._port) + return false; + return memcmp(_sin6, rhs._sin6, sizeof(_sin6)) == 0; +} + +bool PackedSockAddr::operator!=(const PackedSockAddr& rhs) const +{ + return !(*this == rhs); +} + +uint32 PackedSockAddr::compute_hash() const { + return utp_hash_mem(&_in, sizeof(_in)) ^ _port; +} + +void PackedSockAddr::set(const SOCKADDR_STORAGE* sa, socklen_t len) +{ + if (sa->ss_family == AF_INET) { + assert(len >= sizeof(sockaddr_in)); + const sockaddr_in *sin = (sockaddr_in*)sa; + _sin6w[0] = 0; + _sin6w[1] = 0; + _sin6w[2] = 0; + _sin6w[3] = 0; + _sin6w[4] = 0; + _sin6w[5] = 0xffff; + _sin4 = sin->sin_addr.s_addr; + _port = ntohs(sin->sin_port); + } else { + assert(len >= sizeof(sockaddr_in6)); + const sockaddr_in6 *sin6 = (sockaddr_in6*)sa; + _in._in6addr = sin6->sin6_addr; + _port = ntohs(sin6->sin6_port); + } +} + +PackedSockAddr::PackedSockAddr(const SOCKADDR_STORAGE* sa, socklen_t len) +{ + set(sa, len); +} + +PackedSockAddr::PackedSockAddr(void) +{ + SOCKADDR_STORAGE sa; + socklen_t len = sizeof(SOCKADDR_STORAGE); + memset(&sa, 0, len); + sa.ss_family = AF_INET; + set(&sa, len); +} + +SOCKADDR_STORAGE PackedSockAddr::get_sockaddr_storage(socklen_t *len = NULL) const +{ + SOCKADDR_STORAGE sa; + const byte family = get_family(); + if (family == AF_INET) { + sockaddr_in *sin = (sockaddr_in*)&sa; + if (len) *len = sizeof(sockaddr_in); + memset(sin, 0, sizeof(sockaddr_in)); + sin->sin_family = family; + sin->sin_port = htons(_port); + sin->sin_addr.s_addr = _sin4; + } else { + sockaddr_in6 *sin6 = (sockaddr_in6*)&sa; + memset(sin6, 0, sizeof(sockaddr_in6)); + if (len) *len = sizeof(sockaddr_in6); + sin6->sin6_family = family; + sin6->sin6_addr = _in._in6addr; + sin6->sin6_port = htons(_port); + } + return sa; +} + +// #define addrfmt(x, s) x.fmt(s, sizeof(s)) +cstr PackedSockAddr::fmt(str s, size_t len) const +{ + memset(s, 0, len); + const byte family = get_family(); + str i; + if (family == AF_INET) { + inet_ntop(family, (uint32*)&_sin4, s, len); + i = s; + while (*++i) {} + } else { + i = s; + *i++ = '['; + inet_ntop(family, (in6_addr*)&_in._in6addr, i, len-1); + while (*++i) {} + *i++ = ']'; + } + snprintf(i, len - (i-s), ":%u", _port); + return s; +} diff --git a/libs/libks/src/utp/utp_packedsockaddr.h b/libs/libks/src/utp/utp_packedsockaddr.h new file mode 100644 index 0000000000..76e8accaa0 --- /dev/null +++ b/libs/libks/src/utp/utp_packedsockaddr.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2010-2013 BitTorrent, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef __UTP_PACKEDSOCKADDR_H__ +#define __UTP_PACKEDSOCKADDR_H__ + +#include "utp_types.h" + +struct PACKED_ATTRIBUTE PackedSockAddr { + // The values are always stored here in network byte order + union { + byte _in6[16]; // IPv6 + uint16 _in6w[8]; // IPv6, word based (for convenience) + uint32 _in6d[4]; // Dword access + in6_addr _in6addr; // For convenience + } _in; + + // Host byte order + uint16 _port; + + #define _sin4 _in._in6d[3] // IPv4 is stored where it goes if mapped + + #define _sin6 _in._in6 + #define _sin6w _in._in6w + #define _sin6d _in._in6d + + byte get_family() const; + bool operator==(const PackedSockAddr& rhs) const; + bool operator!=(const PackedSockAddr& rhs) const; + void set(const SOCKADDR_STORAGE* sa, socklen_t len); + + PackedSockAddr(const SOCKADDR_STORAGE* sa, socklen_t len); + PackedSockAddr(void); + + SOCKADDR_STORAGE get_sockaddr_storage(socklen_t *len) const; + cstr fmt(str s, size_t len) const; + + uint32 compute_hash() const; +} ALIGNED_ATTRIBUTE(4); + +#endif //__UTP_PACKEDSOCKADDR_H__ diff --git a/libs/libks/src/utp/utp_templates.h b/libs/libks/src/utp/utp_templates.h new file mode 100644 index 0000000000..8f88f5c7c0 --- /dev/null +++ b/libs/libks/src/utp/utp_templates.h @@ -0,0 +1,195 @@ +/* + * Copyright (c) 2010-2013 BitTorrent, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef __TEMPLATES_H__ +#define __TEMPLATES_H__ + +#include "utp_types.h" +#include + +#if defined(POSIX) +/* Allow over-writing FORCEINLINE from makefile because gcc 3.4.4 for buffalo + doesn't seem to support __attribute__((always_inline)) in -O0 build + (strangely, it works in -Os build) */ +#ifndef FORCEINLINE +// The always_inline attribute asks gcc to inline the function even if no optimization is being requested. +// This macro should be used exclusive-or with the inline directive (use one or the other but not both) +// since Microsoft uses __forceinline to also mean inline, +// and this code is following a Microsoft compatibility model. +// Just setting the attribute without also specifying the inline directive apparently won't inline the function, +// as evidenced by multiply-defined symbols found at link time. +#define FORCEINLINE inline __attribute__((always_inline)) +#endif +#endif + +// Utility templates +#undef min +#undef max + +template static inline T min(T a, T b) { if (a < b) return a; return b; } +template static inline T max(T a, T b) { if (a > b) return a; return b; } + +template static inline T min(T a, T b, T c) { return min(min(a,b),c); } +template static inline T max(T a, T b, T c) { return max(max(a,b),c); } +template static inline T clamp(T v, T mi, T ma) +{ + if (v > ma) v = ma; + if (v < mi) v = mi; + return v; +} + +#if (defined(__SVR4) && defined(__sun)) + #pragma pack(1) +#else + #pragma pack(push,1) +#endif + + +namespace aux +{ + FORCEINLINE uint16 host_to_network(uint16 i) { return htons(i); } + FORCEINLINE uint32 host_to_network(uint32 i) { return htonl(i); } + FORCEINLINE int32 host_to_network(int32 i) { return htonl(i); } + FORCEINLINE uint16 network_to_host(uint16 i) { return ntohs(i); } + FORCEINLINE uint32 network_to_host(uint32 i) { return ntohl(i); } + FORCEINLINE int32 network_to_host(int32 i) { return ntohl(i); } +} + +template +struct PACKED_ATTRIBUTE big_endian +{ + T operator=(T i) { m_integer = aux::host_to_network(i); return i; } + operator T() const { return aux::network_to_host(m_integer); } +private: + T m_integer; +}; + +typedef big_endian int32_big; +typedef big_endian uint32_big; +typedef big_endian uint16_big; + +#if (defined(__SVR4) && defined(__sun)) + #pragma pack(0) +#else + #pragma pack(pop) +#endif + +template static inline void zeromem(T *a, size_t count = 1) { memset(a, 0, count * sizeof(T)); } + +typedef int SortCompareProc(const void *, const void *); + +template static FORCEINLINE void QuickSortT(T *base, size_t num, int (*comp)(const T *, const T *)) { qsort(base, num, sizeof(T), (SortCompareProc*)comp); } + + +// WARNING: The template parameter MUST be a POD type! +template class Array { +protected: + T *mem; + size_t alloc,count; + +public: + Array(size_t init) { Init(init); } + Array() { Init(); } + ~Array() { Free(); } + + void inline Init() { mem = NULL; alloc = count = 0; } + void inline Init(size_t init) { Init(); if (init) Resize(init); } + size_t inline GetCount() const { return count; } + size_t inline GetAlloc() const { return alloc; } + void inline SetCount(size_t c) { count = c; } + + inline T& operator[](size_t offset) { assert(offset ==0 || offset(minsize, alloc * 2)); } + + inline size_t Append(const T &t) { + if (count >= alloc) Grow(); + size_t r=count++; + mem[r] = t; + return r; + } + + T inline &Append() { + if (count >= alloc) Grow(); + return mem[count++]; + } + + void inline Compact() { + Resize(count); + } + + void inline Free() { + free(mem); + Init(); + } + + void inline Clear() { + count = 0; + } + + bool inline MoveUpLast(size_t index) { + assert(index < count); + size_t c = --count; + if (index != c) { + mem[index] = mem[c]; + return true; + } + return false; + } + + bool inline MoveUpLastExist(const T &v) { + return MoveUpLast(LookupElementExist(v)); + } + + size_t inline LookupElement(const T &v) const { + for(size_t i = 0; i != count; i++) + if (mem[i] == v) + return i; + return (size_t) -1; + } + + bool inline HasElement(const T &v) const { + return LookupElement(v) != -1; + } + + typedef int SortCompareProc(const T *a, const T *b); + + void Sort(SortCompareProc* proc, size_t start, size_t end) { + QuickSortT(&mem[start], end - start, proc); + } + + void Sort(SortCompareProc* proc, size_t start) { + Sort(proc, start, count); + } + + void Sort(SortCompareProc* proc) { + Sort(proc, 0, count); + } +}; + +#endif //__TEMPLATES_H__ diff --git a/libs/libks/src/utp/utp_types.h b/libs/libks/src/utp/utp_types.h new file mode 100644 index 0000000000..0b953ee3e2 --- /dev/null +++ b/libs/libks/src/utp/utp_types.h @@ -0,0 +1,121 @@ +/* + * Copyright (c) 2010-2013 BitTorrent, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef __UTP_TYPES_H__ +#define __UTP_TYPES_H__ + +#ifdef __GNUC__ + // Used for gcc tool chains accepting but not supporting pragma pack + // See http://gcc.gnu.org/onlinedocs/gcc/Type-Attributes.html + #define PACKED_ATTRIBUTE __attribute__((__packed__)) +#else + #define PACKED_ATTRIBUTE +#endif + +#ifdef __GNUC__ + #define ALIGNED_ATTRIBUTE(x) __attribute__((aligned (x))) +#else + #define ALIGNED_ATTRIBUTE(x) +#endif + +// hash.cpp needs socket definitions, which is why this networking specific +// code is inclued in utypes.h +#ifdef WIN32 + #define _CRT_SECURE_NO_DEPRECATE + #define WIN32_LEAN_AND_MEAN + #include + #include + #include + #define IP_OPT_DONTFRAG IP_DONTFRAGMENT +#else + #include + #include + #include + #include + + #ifdef IP_DONTFRAG + #define IP_OPT_DONTFRAG IP_DONTFRAG + #elif defined IP_DONTFRAGMENT + #define IP_OPT_DONTFRAG IP_DONTFRAGMENT + #else + //#warning "I don't know how to set DF bit on this system" + #endif +#endif + +#ifdef _MSC_VER + #include + typedef SSIZE_T ssize_t; +#endif + +#ifdef POSIX + typedef struct sockaddr_storage SOCKADDR_STORAGE; +#endif + +#ifdef WIN32 + #define I64u "%I64u" +#else + #define I64u "%Lu" +#endif + +#ifdef WIN32 + #define snprintf _snprintf +#endif + +// standard types +typedef unsigned char byte; +typedef unsigned char uint8; +typedef signed char int8; +typedef unsigned short uint16; +typedef signed short int16; +typedef unsigned int uint; +typedef unsigned int uint32; +typedef signed int int32; + +#ifdef _MSC_VER +typedef unsigned __int64 uint64; +typedef signed __int64 int64; +#else +typedef unsigned long long uint64; +typedef long long int64; +#endif + +/* compile-time assert */ +#ifndef CASSERT +#define CASSERT( exp, name ) typedef int is_not_##name [ (exp ) ? 1 : -1 ]; +#endif + +CASSERT(8 == sizeof(uint64), sizeof_uint64_is_8) +CASSERT(8 == sizeof(int64), sizeof_int64_is_8) + +#ifndef INT64_MAX +#define INT64_MAX 0x7fffffffffffffffLL +#endif + +// always ANSI +typedef const char * cstr; +typedef char * str; + +#ifndef __cplusplus +typedef uint8 bool; +#endif + +#endif //__UTP_TYPES_H__ diff --git a/libs/libks/src/utp/utp_utils.cpp b/libs/libks/src/utp/utp_utils.cpp new file mode 100644 index 0000000000..f2c57abe4f --- /dev/null +++ b/libs/libks/src/utp/utp_utils.cpp @@ -0,0 +1,254 @@ +/* + * Copyright (c) 2010-2013 BitTorrent, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include +#include +#include "utp.h" +#include "utp_types.h" + +#ifdef WIN32 + #define WIN32_LEAN_AND_MEAN + #include + #include + #include +#else //!WIN32 + #include + #include // Linux needs both time.h and sys/time.h +#endif + +#if defined(__APPLE__) + #include +#endif + +#include "utp_utils.h" + +#ifdef WIN32 + +typedef ULONGLONG (WINAPI GetTickCount64Proc)(void); +static GetTickCount64Proc *pt2GetTickCount64; +static GetTickCount64Proc *pt2RealGetTickCount; + +static uint64 startPerformanceCounter; +static uint64 startGetTickCount; +// MSVC 6 standard doesn't like division with uint64s +static double counterPerMicrosecond; + +static uint64 UTGetTickCount64() +{ + if (pt2GetTickCount64) { + return pt2GetTickCount64(); + } + if (pt2RealGetTickCount) { + uint64 v = pt2RealGetTickCount(); + // fix return value from GetTickCount + return (DWORD)v | ((v >> 0x18) & 0xFFFFFFFF00000000); + } + return (uint64)GetTickCount(); +} + +static void Time_Initialize() +{ + HMODULE kernel32 = GetModuleHandleA("kernel32.dll"); + pt2GetTickCount64 = (GetTickCount64Proc*)GetProcAddress(kernel32, "GetTickCount64"); + // not a typo. GetTickCount actually returns 64 bits + pt2RealGetTickCount = (GetTickCount64Proc*)GetProcAddress(kernel32, "GetTickCount"); + + uint64 frequency; + QueryPerformanceCounter((LARGE_INTEGER*)&startPerformanceCounter); + QueryPerformanceFrequency((LARGE_INTEGER*)&frequency); + counterPerMicrosecond = (double)frequency / 1000000.0f; + startGetTickCount = UTGetTickCount64(); +} + +static int64 abs64(int64 x) { return x < 0 ? -x : x; } + +static uint64 __GetMicroseconds() +{ + static bool time_init = false; + if (!time_init) { + time_init = true; + Time_Initialize(); + } + + uint64 counter; + uint64 tick; + + QueryPerformanceCounter((LARGE_INTEGER*) &counter); + tick = UTGetTickCount64(); + + // unfortunately, QueryPerformanceCounter is not guaranteed + // to be monotonic. Make it so. + int64 ret = (int64)(((int64)counter - (int64)startPerformanceCounter) / counterPerMicrosecond); + // if the QPC clock leaps more than one second off GetTickCount64() + // something is seriously fishy. Adjust QPC to stay monotonic + int64 tick_diff = tick - startGetTickCount; + if (abs64(ret / 100000 - tick_diff / 100) > 10) { + startPerformanceCounter -= (uint64)((int64)(tick_diff * 1000 - ret) * counterPerMicrosecond); + ret = (int64)((counter - startPerformanceCounter) / counterPerMicrosecond); + } + return ret; +} + +static inline uint64 UTP_GetMilliseconds() +{ + return GetTickCount(); +} + +#else //!WIN32 + +static inline uint64 UTP_GetMicroseconds(void); +static inline uint64 UTP_GetMilliseconds() +{ + return UTP_GetMicroseconds() / 1000; +} + +#if defined(__APPLE__) + +static uint64 __GetMicroseconds() +{ + // http://developer.apple.com/mac/library/qa/qa2004/qa1398.html + // http://www.macresearch.org/tutorial_performance_and_time + static mach_timebase_info_data_t sTimebaseInfo; + static uint64_t start_tick = 0; + uint64_t tick; + // Returns a counter in some fraction of a nanoseconds + tick = mach_absolute_time(); + if (sTimebaseInfo.denom == 0) { + // Get the timer ratio to convert mach_absolute_time to nanoseconds + mach_timebase_info(&sTimebaseInfo); + start_tick = tick; + } + // Calculate the elapsed time, convert it to microseconds and return it. + return ((tick - start_tick) * sTimebaseInfo.numer) / (sTimebaseInfo.denom * 1000); +} + +#else // !__APPLE__ + +#if ! (defined(_POSIX_TIMERS) && _POSIX_TIMERS > 0 && defined(CLOCK_MONOTONIC)) + #warning "Using non-monotonic function gettimeofday() in UTP_GetMicroseconds()" +#endif + +/* Unfortunately, #ifdef CLOCK_MONOTONIC is not enough to make sure that + POSIX clocks work -- we could be running a recent libc with an ancient + kernel (think OpenWRT). -- jch */ + +static uint64_t __GetMicroseconds() +{ + struct timeval tv; + + #if defined(_POSIX_TIMERS) && _POSIX_TIMERS > 0 && defined(CLOCK_MONOTONIC) + static int have_posix_clocks = -1; + int rc; + + if (have_posix_clocks < 0) { + struct timespec ts; + rc = clock_gettime(CLOCK_MONOTONIC, &ts); + if (rc < 0) { + have_posix_clocks = 0; + } else { + have_posix_clocks = 1; + } + } + + if (have_posix_clocks) { + struct timespec ts; + rc = clock_gettime(CLOCK_MONOTONIC, &ts); + return uint64(ts.tv_sec) * 1000000 + uint64(ts.tv_nsec) / 1000; + } + #endif + + gettimeofday(&tv, NULL); + return uint64(tv.tv_sec) * 1000000 + tv.tv_usec; +} + +#endif //!__APPLE__ + +#endif //!WIN32 + +/* + * Whew. Okay. After that #ifdef maze above, we now know we have a working + * __GetMicroseconds() implementation on all platforms. + * + * Because there are a number of assertions in libutp that will cause a crash + * if monotonic time isn't monotonic, now apply some safety checks. While in + * principle we're already protecting ourselves in cases where non-monotonic + * time is likely to happen, this protects all versions. + */ + +static inline uint64 UTP_GetMicroseconds() +{ + static uint64 offset = 0, previous = 0; + + uint64 now = __GetMicroseconds() + offset; + if (previous > now) { + /* Eek! */ + offset += previous - now; + now = previous; + } + previous = now; + return now; +} + +#define ETHERNET_MTU 1500 +#define IPV4_HEADER_SIZE 20 +#define IPV6_HEADER_SIZE 40 +#define UDP_HEADER_SIZE 8 +#define GRE_HEADER_SIZE 24 +#define PPPOE_HEADER_SIZE 8 +#define MPPE_HEADER_SIZE 2 +// packets have been observed in the wild that were fragmented +// with a payload of 1416 for the first fragment +// There are reports of routers that have MTU sizes as small as 1392 +#define FUDGE_HEADER_SIZE 36 +#define TEREDO_MTU 1280 + +#define UDP_IPV4_OVERHEAD (IPV4_HEADER_SIZE + UDP_HEADER_SIZE) +#define UDP_IPV6_OVERHEAD (IPV6_HEADER_SIZE + UDP_HEADER_SIZE) +#define UDP_TEREDO_OVERHEAD (UDP_IPV4_OVERHEAD + UDP_IPV6_OVERHEAD) + +#define UDP_IPV4_MTU (ETHERNET_MTU - IPV4_HEADER_SIZE - UDP_HEADER_SIZE - GRE_HEADER_SIZE - PPPOE_HEADER_SIZE - MPPE_HEADER_SIZE - FUDGE_HEADER_SIZE) +#define UDP_IPV6_MTU (ETHERNET_MTU - IPV6_HEADER_SIZE - UDP_HEADER_SIZE - GRE_HEADER_SIZE - PPPOE_HEADER_SIZE - MPPE_HEADER_SIZE - FUDGE_HEADER_SIZE) +#define UDP_TEREDO_MTU (TEREDO_MTU - IPV6_HEADER_SIZE - UDP_HEADER_SIZE) + +uint64 utp_default_get_udp_mtu(utp_callback_arguments *args) { + // Since we don't know the local address of the interface, + // be conservative and assume all IPv6 connections are Teredo. + return (args->address->sa_family == AF_INET6) ? UDP_TEREDO_MTU : UDP_IPV4_MTU; +} + +uint64 utp_default_get_udp_overhead(utp_callback_arguments *args) { + // Since we don't know the local address of the interface, + // be conservative and assume all IPv6 connections are Teredo. + return (args->address->sa_family == AF_INET6) ? UDP_TEREDO_OVERHEAD : UDP_IPV4_OVERHEAD; +} + +uint64 utp_default_get_random(utp_callback_arguments *args) { + return rand(); +} + +uint64 utp_default_get_milliseconds(utp_callback_arguments *args) { + return UTP_GetMilliseconds(); +} + +uint64 utp_default_get_microseconds(utp_callback_arguments *args) { + return UTP_GetMicroseconds(); +} diff --git a/libs/libks/src/utp/utp_utils.h b/libs/libks/src/utp/utp_utils.h new file mode 100644 index 0000000000..7eb0c55621 --- /dev/null +++ b/libs/libks/src/utp/utp_utils.h @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2010-2013 BitTorrent, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +uint64 utp_default_get_udp_mtu(utp_callback_arguments *args); +uint64 utp_default_get_udp_overhead(utp_callback_arguments *args); +uint64 utp_default_get_random(utp_callback_arguments *args); +uint64 utp_default_get_milliseconds(utp_callback_arguments *args); +uint64 utp_default_get_microseconds(utp_callback_arguments *args); diff --git a/libs/libks/src/win/mman.c b/libs/libks/src/win/mman.c index b154317e98..d61571fbc3 100644 --- a/libs/libks/src/win/mman.c +++ b/libs/libks/src/win/mman.c @@ -11,170 +11,156 @@ static int __map_mman_error(const DWORD err, const int deferr) { - if (err == 0) - return 0; - //TODO: implement - return err; + if (err == 0) + return 0; + //TODO: implement + return err; } static DWORD __map_mmap_prot_page(const int prot) { - DWORD protect = 0; - - if (prot == PROT_NONE) - return protect; - - if ((prot & PROT_EXEC) != 0) - { - protect = ((prot & PROT_WRITE) != 0) ? - PAGE_EXECUTE_READWRITE : PAGE_EXECUTE_READ; - } - else - { - protect = ((prot & PROT_WRITE) != 0) ? - PAGE_READWRITE : PAGE_READONLY; - } - - return protect; + DWORD protect = 0; + + if (prot == PROT_NONE) + return protect; + + if ((prot & PROT_EXEC) != 0) { + protect = ((prot & PROT_WRITE) != 0) ? PAGE_EXECUTE_READWRITE : PAGE_EXECUTE_READ; + } else { + protect = ((prot & PROT_WRITE) != 0) ? PAGE_READWRITE : PAGE_READONLY; + } + + return protect; } static DWORD __map_mmap_prot_file(const int prot) { - DWORD desiredAccess = 0; - - if (prot == PROT_NONE) - return desiredAccess; - - if ((prot & PROT_READ) != 0) - desiredAccess |= FILE_MAP_READ; - if ((prot & PROT_WRITE) != 0) - desiredAccess |= FILE_MAP_WRITE; - if ((prot & PROT_EXEC) != 0) - desiredAccess |= FILE_MAP_EXECUTE; - - return desiredAccess; + DWORD desiredAccess = 0; + + if (prot == PROT_NONE) + return desiredAccess; + + if ((prot & PROT_READ) != 0) + desiredAccess |= FILE_MAP_READ; + if ((prot & PROT_WRITE) != 0) + desiredAccess |= FILE_MAP_WRITE; + if ((prot & PROT_EXEC) != 0) + desiredAccess |= FILE_MAP_EXECUTE; + + return desiredAccess; } -void* mmap(void *addr, size_t len, int prot, int flags, int fildes, off_t off) +void *mmap(void *addr, size_t len, int prot, int flags, int fildes, off_t off) { - HANDLE fm, h; - - void * map = MAP_FAILED; - + HANDLE fm, h; + + void *map = MAP_FAILED; + #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable: 4293) #endif - const DWORD dwFileOffsetLow = (sizeof(off_t) <= sizeof(DWORD)) ? - (DWORD)off : (DWORD)(off & 0xFFFFFFFFL); - const DWORD dwFileOffsetHigh = (sizeof(off_t) <= sizeof(DWORD)) ? - (DWORD)0 : (DWORD)((off >> 32) & 0xFFFFFFFFL); - const DWORD protect = __map_mmap_prot_page(prot); - const DWORD desiredAccess = __map_mmap_prot_file(prot); + const DWORD dwFileOffsetLow = (sizeof(off_t) <= sizeof(DWORD)) ? (DWORD) off : (DWORD) (off & 0xFFFFFFFFL); + const DWORD dwFileOffsetHigh = (sizeof(off_t) <= sizeof(DWORD)) ? (DWORD) 0 : (DWORD) ((off >> 32) & 0xFFFFFFFFL); + const DWORD protect = __map_mmap_prot_page(prot); + const DWORD desiredAccess = __map_mmap_prot_file(prot); - const off_t maxSize = off + (off_t)len; + const off_t maxSize = off + (off_t) len; - const DWORD dwMaxSizeLow = (sizeof(off_t) <= sizeof(DWORD)) ? - (DWORD)maxSize : (DWORD)(maxSize & 0xFFFFFFFFL); - const DWORD dwMaxSizeHigh = (sizeof(off_t) <= sizeof(DWORD)) ? - (DWORD)0 : (DWORD)((maxSize >> 32) & 0xFFFFFFFFL); + const DWORD dwMaxSizeLow = (sizeof(off_t) <= sizeof(DWORD)) ? (DWORD) maxSize : (DWORD) (maxSize & 0xFFFFFFFFL); + const DWORD dwMaxSizeHigh = (sizeof(off_t) <= sizeof(DWORD)) ? (DWORD) 0 : (DWORD) ((maxSize >> 32) & 0xFFFFFFFFL); #ifdef _MSC_VER #pragma warning(pop) #endif - errno = 0; - - if (len == 0 - /* Unsupported flag combinations */ - || (flags & MAP_FIXED) != 0 - /* Usupported protection combinations */ - || prot == PROT_EXEC) - { - errno = EINVAL; - return MAP_FAILED; - } - - h = ((flags & MAP_ANONYMOUS) == 0) ? - (HANDLE)_get_osfhandle(fildes) : INVALID_HANDLE_VALUE; + errno = 0; - if ((flags & MAP_ANONYMOUS) == 0 && h == INVALID_HANDLE_VALUE) - { - errno = EBADF; - return MAP_FAILED; - } + if (len == 0 + /* Unsupported flag combinations */ + || (flags & MAP_FIXED) != 0 + /* Usupported protection combinations */ + || prot == PROT_EXEC) { + errno = EINVAL; + return MAP_FAILED; + } - fm = CreateFileMapping(h, NULL, protect, dwMaxSizeHigh, dwMaxSizeLow, NULL); + h = ((flags & MAP_ANONYMOUS) == 0) ? (HANDLE) _get_osfhandle(fildes) : INVALID_HANDLE_VALUE; - if (fm == NULL) - { - errno = __map_mman_error(GetLastError(), EPERM); - return MAP_FAILED; - } - - map = MapViewOfFile(fm, desiredAccess, dwFileOffsetHigh, dwFileOffsetLow, len); + if ((flags & MAP_ANONYMOUS) == 0 && h == INVALID_HANDLE_VALUE) { + errno = EBADF; + return MAP_FAILED; + } - CloseHandle(fm); - - if (map == NULL) - { - errno = __map_mman_error(GetLastError(), EPERM); - return MAP_FAILED; - } + fm = CreateFileMapping(h, NULL, protect, dwMaxSizeHigh, dwMaxSizeLow, NULL); - return map; + if (fm == NULL) { + errno = __map_mman_error(GetLastError(), EPERM); + return MAP_FAILED; + } + + map = MapViewOfFile(fm, desiredAccess, dwFileOffsetHigh, dwFileOffsetLow, len); + + CloseHandle(fm); + + if (map == NULL) { + errno = __map_mman_error(GetLastError(), EPERM); + return MAP_FAILED; + } + + return map; } int munmap(void *addr, size_t len) { - if (UnmapViewOfFile(addr)) - return 0; - - errno = __map_mman_error(GetLastError(), EPERM); - - return -1; + if (UnmapViewOfFile(addr)) + return 0; + + errno = __map_mman_error(GetLastError(), EPERM); + + return -1; } int _mprotect(void *addr, size_t len, int prot) { - DWORD newProtect = __map_mmap_prot_page(prot); - DWORD oldProtect = 0; - - if (VirtualProtect(addr, len, newProtect, &oldProtect)) - return 0; - - errno = __map_mman_error(GetLastError(), EPERM); - - return -1; + DWORD newProtect = __map_mmap_prot_page(prot); + DWORD oldProtect = 0; + + if (VirtualProtect(addr, len, newProtect, &oldProtect)) + return 0; + + errno = __map_mman_error(GetLastError(), EPERM); + + return -1; } int msync(void *addr, size_t len, int flags) { - if (FlushViewOfFile(addr, len)) - return 0; - - errno = __map_mman_error(GetLastError(), EPERM); - - return -1; + if (FlushViewOfFile(addr, len)) + return 0; + + errno = __map_mman_error(GetLastError(), EPERM); + + return -1; } int mlock(const void *addr, size_t len) { - if (VirtualLock((LPVOID)addr, len)) - return 0; - - errno = __map_mman_error(GetLastError(), EPERM); - - return -1; + if (VirtualLock((LPVOID) addr, len)) + return 0; + + errno = __map_mman_error(GetLastError(), EPERM); + + return -1; } int munlock(const void *addr, size_t len) { - if (VirtualUnlock((LPVOID)addr, len)) - return 0; - - errno = __map_mman_error(GetLastError(), EPERM); - - return -1; + if (VirtualUnlock((LPVOID) addr, len)) + return 0; + + errno = __map_mman_error(GetLastError(), EPERM); + + return -1; } diff --git a/libs/libks/src/win/sys/mman.h b/libs/libks/src/win/sys/mman.h index 7c1197b819..e003da8260 100644 --- a/libs/libks/src/win/sys/mman.h +++ b/libs/libks/src/win/sys/mman.h @@ -6,9 +6,9 @@ #ifndef _SYS_MMAN_H_ #define _SYS_MMAN_H_ -#ifndef _WIN32_WINNT // Allow use of features specific to Windows XP or later. -#define _WIN32_WINNT 0x0501 // Change this to the appropriate value to target other versions of Windows. -#endif +#ifndef _WIN32_WINNT // Allow use of features specific to Windows XP or later. +#define _WIN32_WINNT 0x0501 // Change this to the appropriate value to target other versions of Windows. +#endif /* All the headers include this file. */ #ifndef _MSC_VER @@ -41,12 +41,12 @@ extern "C" { #define MS_SYNC 2 #define MS_INVALIDATE 4 -void* mmap(void *addr, size_t len, int prot, int flags, int fildes, off_t off); -int munmap(void *addr, size_t len); -int _mprotect(void *addr, size_t len, int prot); -int msync(void *addr, size_t len, int flags); -int mlock(const void *addr, size_t len); -int munlock(const void *addr, size_t len); + void *mmap(void *addr, size_t len, int prot, int flags, int fildes, off_t off); + int munmap(void *addr, size_t len); + int _mprotect(void *addr, size_t len, int prot); + int msync(void *addr, size_t len, int flags); + int mlock(const void *addr, size_t len); + int munlock(const void *addr, size_t len); #ifdef __cplusplus }; diff --git a/libs/libks/test/.gitignore b/libs/libks/test/.gitignore new file mode 100644 index 0000000000..a17940c85b --- /dev/null +++ b/libs/libks/test/.gitignore @@ -0,0 +1,16 @@ +*.log +*.trs +table_file +testpools +testtable +testthreadmutex +testtime +testhash +testq +testsock +testsock2 +testwebsock +testdht +testdht_net +testdht_msg +dht_example diff --git a/libs/libks/test/Makefile.am b/libs/libks/test/Makefile.am new file mode 100644 index 0000000000..c7d3d8fd55 --- /dev/null +++ b/libs/libks/test/Makefile.am @@ -0,0 +1,81 @@ +AM_CFLAGS += -I$(abs_top_srcdir)/src/include -g -ggdb -O0 $(openssl_CFLAGS) +TEST_LDADD = $(abs_top_builddir)/libks.la $(openssl_LIBS) +check_PROGRAMS = + +EXTRA_DIST = tap.h + +check_PROGRAMS += testpools +testpools_SOURCES = testpools.c tap.c +testpools_CFLAGS = $(AM_CFLAGS) +testpools_LDADD = $(TEST_LDADD) + +check_PROGRAMS += testthreadmutex +testthreadmutex_SOURCES = testthreadmutex.c tap.c +testthreadmutex_CFLAGS = $(AM_CFLAGS) +testthreadmutex_LDADD = $(TEST_LDADD) + +check_PROGRAMS += testtime +testtime_SOURCES = testtime.c tap.c +testtime_CFLAGS = $(AM_CFLAGS) +testtime_LDADD = $(TEST_LDADD) + +check_PROGRAMS += testq +testq_SOURCES = testq.c tap.c +testq_CFLAGS = $(AM_CFLAGS) +testq_LDADD = $(TEST_LDADD) + +check_PROGRAMS += testhash +testhash_SOURCES = testhash.c tap.c +testhash_CFLAGS = $(AM_CFLAGS) +testhash_LDADD = $(TEST_LDADD) + +check_PROGRAMS += testsock +testsock_SOURCES = testsock.c tap.c +testsock_CFLAGS = $(AM_CFLAGS) +testsock_LDADD = $(TEST_LDADD) + +check_PROGRAMS += testsock2 +testsock2_SOURCES = testsock2.c tap.c +testsock2_CFLAGS = $(AM_CFLAGS) +testsock2_LDADD = $(TEST_LDADD) + +check_PROGRAMS += testwebsock +testwebsock_SOURCES = testwebsock.c tap.c +testwebsock_CFLAGS = $(AM_CFLAGS) +testwebsock_LDADD = $(TEST_LDADD) + +check_PROGRAMS += testdht +testdht_SOURCES = testdht.c tap.c +testdht_CFLAGS = $(AM_CFLAGS) +testdht_LDADD = $(TEST_LDADD) + +check_PROGRAMS += testdht_net +testdht_net_SOURCES = testdht-net.c tap.c +testdht_net_CFLAGS = $(AM_CFLAGS) +testdht_net_LDADD = $(TEST_LDADD) + +check_PROGRAMS += testdht_msg +testdht_msg_SOURCES = testdht-msg.c tap.c +testdht_msg_CFLAGS = $(AM_CFLAGS) +testdht_msg_LDADD = $(TEST_LDADD) + +check_PROGRAMS += dht_example +dht_example_SOURCES = dht-example.c +dht_example_CFLAGS = $(AM_CFLAGS) +dht_example_LDADD = $(abs_top_builddir)/libks.la $(openssl_LIBS) -ledit -lpthread + +#check_PROGRAMS += libtorrent_example +#libtorrent_example_SOURCES = libtorrent-example.c +#libtorrent_example_CFLAGS = $(AM_CFLAGS) +#libtorrent_example_LDADD = $(abs_top_builddir)/libks.la $(abs_top_builddir)/test/libtorrent.so /usr/lib/x86_64-linux-gnu/libboost_system.a $(openssl_LIBS) -ledit -lpthread -ltorrent-rasterbar -lstdc++ + +TESTS=$(check_PROGRAMS) + +tests: $(check_PROGRAMS) + +$(abs_top_builddir)/test/libtorrent.so: $(abs_top_builddir)/test/libtorrent.o + g++ -shared -o $(abs_top_builddir)/test/libtorrent.so $(abs_top_builddir)/test/libtorrent.o + +$(abs_top_builddir)/test/libtorrent.o: $(abs_top_builddir)/test/libtorrent.cpp + g++ -c -fPIC -o $(abs_top_builddir)/test/libtorrent.o -I$(abs_top_builddir)/test/ $(abs_top_builddir)/test/libtorrent.cpp + diff --git a/libs/libks/test/dht-example.c b/libs/libks/test/dht-example.c new file mode 100644 index 0000000000..3b6f29a583 --- /dev/null +++ b/libs/libks/test/dht-example.c @@ -0,0 +1,399 @@ +/* This example code was written by Juliusz Chroboczek. + You are free to cut'n'paste from it to your heart's content. */ + +/* For crypt */ +#define _GNU_SOURCE + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ks.h" +#include "histedit.h" +#include "sodium.h" + +#define MAX_BOOTSTRAP_NODES 20 +static ks_sockaddr_t bootstrap_nodes[MAX_BOOTSTRAP_NODES]; +static ks_sockaddr_t bind_nodes[MAX_BOOTSTRAP_NODES]; +static int num_bootstrap_nodes = 0; +static int num_bind_nodes = 0; + +/* The call-back function is called by the DHT whenever something + interesting happens. Right now, it only happens when we get a new value or + when a search completes, but this may be extended in future versions. */ +static void callback(void *closure, ks_dht_event_t event, const unsigned char *info_hash, const void *data, size_t data_len) +{ + if(event == KS_DHT_EVENT_SEARCH_DONE) { + printf("Search done.\n"); + } else if(event == KS_DHT_EVENT_VALUES) { + const uint8_t *bits_8 = data; + const uint16_t *bits_16 = data; + + printf("Received %d values.\n", (int)(data_len / 6)); + printf("Recieved %u.%u.%u.%u:%u\n", bits_8[0], bits_8[1], bits_8[2], bits_8[3], ntohs(bits_16[2])); + } else { + printf("Unhandled event %d\n", event); + } +} + +void json_cb(struct dht_handle_s *h, const cJSON *msg, void *arg) +{ + char *pretty = cJSON_Print((cJSON *)msg); + + printf("Received json msg: %s\n", pretty); + + free(pretty); +} + +static char * prompt(EditLine *e) { + return "dht> "; +} + +static dht_handle_t *h; + + +typedef struct dht_globals_s { + int s; + int s6; + int port; + int exiting; +} dht_globals_t; + +void *dht_event_thread(ks_thread_t *thread, void *data) +{ + dht_globals_t *globals = data; + + while(!globals->exiting) { + ks_dht_one_loop(h, 0); + ks_sleep(1000000); + } + + return NULL; +} + +int +main(int argc, char **argv) +{ + dht_globals_t globals = {0}; + int i; + //int have_id = 0; + //char *id_file = "dht-example.id"; + int ipv4 = 0, ipv6 = 0; + int autobind = 0; + int opt; + EditLine *el; + History *myhistory; + int count; + const char *line; + HistEvent ev; + ks_status_t status; + static ks_thread_t *threads[1]; /* Main dht event thread */ + ks_pool_t *pool; + int err = 0; + unsigned char alice_publickey[crypto_box_PUBLICKEYBYTES] = {0}; + unsigned char alice_secretkey[crypto_box_SECRETKEYBYTES] = {0}; + + ks_init(); + + el = el_init("test", stdin, stdout, stderr); + el_set(el, EL_PROMPT, &prompt); + el_set(el, EL_EDITOR, "emacs"); + myhistory = history_init(); + history(myhistory, &ev, H_SETSIZE, 800); + el_set(el, EL_HIST, history, myhistory); + globals.port = 5309; + + + ks_global_set_default_logger(7); + + while(1) { + opt = getopt(argc, argv, "46ap:b:B:"); + if(opt < 0) + break; + + switch(opt) { + case '4': + ipv4 = 1; + break; + case '6': + ipv6 = 1; + break; + case 'a': + autobind = 1; + break; + case 'p': + globals.port = atoi(optarg); + break; + case 'b': + case 'B': { + char ip[80]; + int port = globals.port; + char *p; + ks_set_string(ip, optarg); + + if ((p = strchr(ip, '+'))) { + *p++ = '\0'; + port = atoi(p); + } + if (opt == 'B') { + printf("Adding bootstrap node %s:%d\n", ip, port); + ks_addr_set(&bootstrap_nodes[num_bootstrap_nodes++], ip, port, 0); + } else { + printf("Adding binding %s:%d\n", ip, port); + ks_addr_set(&bind_nodes[num_bind_nodes++], ip, port, 0); + } + } + break; + default: + goto usage; + } + } + + if(argc < 2) + goto usage; + + i = optind; + + if(globals.port <= 0 || globals.port >= 0x10000) + goto usage; + + + ks_dht_af_flag_t af_flags = 0; + + if (ipv4) { + af_flags |= KS_DHT_AF_INET4; + } + + if (ipv6) { + af_flags |= KS_DHT_AF_INET6; + } + + /* Init the dht. */ + status = ks_dht_init(&h, af_flags, NULL, globals.port); + + if(status != KS_STATUS_SUCCESS) { + perror("dht_init"); + exit(1); + } + + for(i = 0; i < num_bind_nodes; i++) { + ks_dht_add_ip(h, bind_nodes[i].host, bind_nodes[i].port); + } + + if (autobind) { + ks_dht_set_param(h, DHT_PARAM_AUTOROUTE, KS_TRUE); + } + + ks_dht_start(h); + + ks_dht_set_callback(h, callback, NULL); + + ks_pool_open(&pool); + status = ks_thread_create_ex(&threads[0], dht_event_thread, &globals, KS_THREAD_FLAG_DETATCHED, KS_THREAD_DEFAULT_STACK, KS_PRI_NORMAL, pool); + + if ( status != KS_STATUS_SUCCESS) { + printf("Failed to start DHT event thread\n"); + exit(1); + } + + /* For bootstrapping, we need an initial list of nodes. This could be + hard-wired, but can also be obtained from the nodes key of a torrent + file, or from the PORT bittorrent message. + + Dht_ping_node is the brutal way of bootstrapping -- it actually + sends a message to the peer. If you're going to bootstrap from + a massive number of nodes (for example because you're restoring from + a dump) and you already know their ids, it's better to use + dht_insert_node. If the ids are incorrect, the DHT will recover. */ + for(i = 0; i < num_bootstrap_nodes; i++) { + dht_ping_node(h, &bootstrap_nodes[i]); + usleep(random() % 100000); + } + + printf("TESTING!!!\n"); + err = crypto_sign_keypair(alice_publickey, alice_secretkey); + printf("Result of generating keypair %d\n", err); + + ks_dht_store_entry_json_cb_set(h, json_cb, NULL); + + while ( !globals.exiting ) { + line = el_gets(el, &count); + + if (count > 1) { + int line_len = (int)strlen(line) - 1; + char *cmd_dup = strdup(line); + char *argv[8] = { 0 }; + int argc = 0; + + history(myhistory, &ev, H_ENTER, line); + + if ( cmd_dup[line_len] == '\n' ) { + cmd_dup[line_len] = '\0'; + } + argc = ks_separate_string(cmd_dup, " ", argv, (sizeof(argv) / sizeof(argv[0]))); + + if (!strncmp(line, "quit", 4)) { + globals.exiting = 1; + } else if (!strncmp(line, "show_bind", 9)) { + const ks_sockaddr_t **bindings; + ks_size_t len = 0; + int i; + + ks_dht_get_bind_addrs(h, &bindings, &len); + + for (i = 0; i < len; i++) { + printf("Bind addr %s:%d\n", bindings[i]->host, bindings[i]->port); + } + + } else if (!strncmp(line, "ping ", 5)) { + const char *ip = line + 5; + ks_sockaddr_t tmp; + char *p; + + while ((p = strchr(ip, '\r')) || (p = strchr(ip, '\n'))) { + *p = '\0'; + } + + ks_addr_set(&tmp, ip, globals.port, 0); + dht_ping_node(h, &tmp); + } else if (!strncmp(line, "find_node ", 9)) { + /* usage: find_node ipv[4|6] [40 character node id] [40 character target id] */ + ks_bool_t ipv6 = strncmp(argv[1], "ipv4", 4); + (void) argc; /* Check to see if it's the right length, else print usage */ + ks_dht_api_find_node(h, argv[2], argv[3], ipv6); + } else if (!strncmp(line, "loglevel", 8)) { + ks_global_set_default_logger(atoi(line + 9)); + } else if (!strncmp(line, "peer_dump", 9)) { + dht_dump_tables(h, stdout); + } else if (!strncmp(line, "generate_identity", 17)) { + /* usage: generate_identity [identity key: first_id] */ + /* requires an arg, checks identity hash for arg value. + + if found, return already exists. + if not found, generate sodium public and private keys, and insert into identities hash. + */ + } else if (!strncmp(line, "print_identity_key", 18)) { + /* usage: print_identity_key [identity key] */ + } else if (!strncmp(line, "message_mutable", 15)) { + char *input = strdup(line); + char *message_id = input + 16; + char *message = NULL; + cJSON *output = NULL; + int idx = 17; /* this should be the start of the message_id */ + for ( idx = 17; idx < 100 && input[idx] != '\0'; idx++ ) { + if ( input[idx] == ' ' ) { + input[idx] = '\0'; + message = input + 1 + idx; + break; + } + } + + /* Hack for my testing, so that it chomps the new line. Makes debugging print nicer. */ + for ( idx++; input[idx] != '\0'; idx++) { + if ( input[idx] == '\n' ) { + input[idx] = '\0'; + } + } + /* usage: message_mutable [identity key] [message id: asdf] [your message: Hello from DHT example]*/ + /* + takes an identity, a message id(salt) and a message, then sends out the announcement. + */ + output = cJSON_CreateString(message); + + ks_dht_send_message_mutable_cjson(h, alice_secretkey, alice_publickey, + NULL, message_id, 1, output, 600); + free(input); + cJSON_Delete(output); + } else if (!strncmp(line, "message_immutable", 15)) { + /* usage: message_immutable [identity key] */ + /* + takes an identity, and a message, then sends out the announcement. + */ + } else if (!strncmp(line, "message_get", 11)) { + /* usage: message_get [40 character sha1 digest b64 encoded]*/ + + /* MUST RETURN BENCODE OBJECT */ + } else if (!strncmp(line, "message_get_mine", 16)) { + /* usage: message_get [identity key] [message id: asdf]*/ + /* This looks up the message token from identity key and the message id(aka message salt) */ + + /* MUST RETURN BENCODE OBJECT */ + } else if (!strncmp(line, "add_buddy", 9)) { + /* usage: add_buddy [buddy key] [buddy public key] */ + + } else if (!strncmp(line, "get_buddy_message", 17)) { + /* usage: get_buddy_message [buddy key] [buddy message_id] */ + + + } else if (!strncmp(line, "search", 6)) { + if ( line_len > 7 ) { + unsigned char hash[20]; + memcpy(hash, line + 7, 20); + + if(globals.s >= 0) { + dht_search(h, hash, 0, AF_INET, callback, NULL); + } + } else { + printf("Your search string isn't a valid 20 character hash. You entered [%.*s] of length %d\n", line_len - 7, line + 7, line_len - 7); + } + } else if (!strncmp(line, "announce", 8)) { + if ( line_len == 29 ) { + unsigned char hash[20]; + memcpy(hash, line + 9, 20); + + if(globals.s >= 0) { + dht_search(h, hash, globals.port, AF_INET, callback, NULL); + } + } else { + printf("Your search string isn't a valid 20 character hash. You entered [%.*s]\n", line_len - 7, line + 7); + } + } else { + printf("Unknown command entered[%.*s]\n", line_len, line); + } + + free(cmd_dup); + } + } + + { + struct sockaddr_in sin[500]; + struct sockaddr_in6 sin6[500]; + int num = 500, num6 = 500; + int i; + i = dht_get_nodes(h, sin, &num, sin6, &num6); + printf("Found %d (%d + %d) good nodes.\n", i, num, num6); + } + + + history_end(myhistory); + el_end(el); + dht_uninit(&h); + ks_shutdown(); + return 0; + + usage: + printf("Usage: dht-example [-a] [-4] [-6] [-p ] [-b [+]]...\n" + " [-B [+]]...\n"); + exit(0); +} + + +/* For Emacs: + * Local Variables: + * mode:c + * indent-tabs-mode:t + * tab-width:4 + * c-basic-offset:4 + * End: + * For VIM: + * vim:set softtabstop=4 shiftwidth=4 tabstop=4 noet: + */ diff --git a/libs/libks/test/libtorrent-example.c b/libs/libks/test/libtorrent-example.c new file mode 100644 index 0000000000..f1165f4c7d --- /dev/null +++ b/libs/libks/test/libtorrent-example.c @@ -0,0 +1,120 @@ +/* This example code was written by Juliusz Chroboczek. + You are free to cut'n'paste from it to your heart's content. */ + +/* For crypt */ +#define _GNU_SOURCE + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ks.h" +#include "histedit.h" +#include "libtorrent.h" + +static char * prompt(EditLine *e) { + return "dht> "; +} + +typedef struct dht_globals_s { + int exiting; +} dht_globals_t; + +int +main(int argc, char **argv) +{ + dht_globals_t globals = {0}; + int opt; + EditLine *el; + History *myhistory; + int count; + const char *line; + HistEvent ev; + ks_status_t status = KS_STATUS_SUCCESS; + // ks_pool_t *pool; + void *session = session_create(SES_LISTENPORT, 8090, + SES_LISTENPORT_END, 8098, + TAG_END); + + session_start_dht(session); + + + globals.exiting = 0; + + el = el_init("test", stdin, stdout, stderr); + el_set(el, EL_PROMPT, &prompt); + el_set(el, EL_EDITOR, "emacs"); + myhistory = history_init(); + history(myhistory, &ev, H_SETSIZE, 800); + el_set(el, EL_HIST, history, myhistory); + + ks_global_set_default_logger(7); + + while(1) { + opt = getopt(argc, argv, "hb:"); + if(opt < 0) + break; + + switch(opt) { + case 'b': { + printf("Not yet implemented\n"); + goto usage; + } + break; + default: + goto usage; + } + } + + /* + ks_pool_open(&pool); + status = ks_thread_create_ex(&threads[0], dht_event_thread, &globals, KS_THREAD_FLAG_DETATCHED, KS_THREAD_DEFAULT_STACK, KS_PRI_NORMAL, pool); + */ + + if ( status != KS_STATUS_SUCCESS) { + printf("Failed to start DHT event thread\n"); + exit(1); + } + + while ( !globals.exiting ) { + line = el_gets(el, &count); + + if (count > 1) { + int line_len = (int)strlen(line) - 1; + history(myhistory, &ev, H_ENTER, line); + + if (!strncmp(line, "quit", 4)) { + globals.exiting = 1; + } else if (!strncmp(line, "loglevel", 8)) { + ks_global_set_default_logger(atoi(line + 9)); + } else if (!strncmp(line, "peer_dump", 9)) { + printf("Not yet implemented\n"); + } else if (!strncmp(line, "search", 6)) { + printf("Not yet implemented\n"); + } else if (!strncmp(line, "announce", 8)) { + printf("Not yet implemented\n"); + } else { + printf("Unknown command entered[%.*s]\n", line_len, line); + } + } + } + + history_end(myhistory); + el_end(el); + session_close(session); + return 0; + + usage: + printf("Usage: dht-example [-4] [-6] [-i filename] [-b address]...\n" + " port [address port]...\n"); + exit(1); +} diff --git a/libs/libks/test/libtorrent.cpp b/libs/libks/test/libtorrent.cpp new file mode 100644 index 0000000000..e03587ac6a --- /dev/null +++ b/libs/libks/test/libtorrent.cpp @@ -0,0 +1,601 @@ +/* + +Copyright (c) 2009, Arvid Norberg +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the distribution. + * Neither the name of the author nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +*/ + +#include "libtorrent/session.hpp" +#include "libtorrent/magnet_uri.hpp" +#include "libtorrent/torrent_handle.hpp" +#include + +#include +#include + +namespace +{ + std::vector handles; + + int find_handle(libtorrent::torrent_handle h) + { + std::vector::const_iterator i + = std::find(handles.begin(), handles.end(), h); + if (i == handles.end()) return -1; + return i - handles.begin(); + } + + libtorrent::torrent_handle get_handle(int i) + { + if (i < 0 || i >= int(handles.size())) return libtorrent::torrent_handle(); + return handles[i]; + } + + int add_handle(libtorrent::torrent_handle const& h) + { + std::vector::iterator i = std::find_if(handles.begin() + , handles.end(), !boost::bind(&libtorrent::torrent_handle::is_valid, _1)); + if (i != handles.end()) + { + *i = h; + return i - handles.begin(); + } + + handles.push_back(h); + return handles.size() - 1; + } + + int set_int_value(void* dst, int* size, int val) + { + if ( *size < (int) sizeof(int)) return -2; + *((int*)dst) = val; + *size = (int) sizeof(int); + return 0; + } + + void copy_proxy_setting(libtorrent::proxy_settings* s, proxy_setting const* ps) + { + s->hostname.assign(ps->hostname); + s->port = ps->port; + s->username.assign(ps->username); + s->password.assign(ps->password); + s->type = (libtorrent::proxy_settings::proxy_type)ps->type; + } +} + +extern "C" +{ + +TORRENT_EXPORT void* session_create(int tag, ...) +{ + using namespace libtorrent; + + va_list lp; + va_start(lp, tag); + + fingerprint fing("LT", LIBTORRENT_VERSION_MAJOR, LIBTORRENT_VERSION_MINOR, 0, 0); + std::pair listen_range(-1, -1); + char const* listen_interface = "0.0.0.0"; + int flags = session::start_default_features | session::add_default_plugins; + int alert_mask = alert::error_notification; + + while (tag != TAG_END) + { + switch (tag) + { + case SES_FINGERPRINT: + { + char const* f = va_arg(lp, char const*); + fing.name[0] = f[0]; + fing.name[1] = f[1]; + break; + } + case SES_LISTENPORT: + listen_range.first = va_arg(lp, int); + break; + case SES_LISTENPORT_END: + listen_range.second = va_arg(lp, int); + break; + case SES_VERSION_MAJOR: + fing.major_version = va_arg(lp, int); + break; + case SES_VERSION_MINOR: + fing.minor_version = va_arg(lp, int); + break; + case SES_VERSION_TINY: + fing.revision_version = va_arg(lp, int); + break; + case SES_VERSION_TAG: + fing.tag_version = va_arg(lp, int); + break; + case SES_FLAGS: + flags = va_arg(lp, int); + break; + case SES_ALERT_MASK: + alert_mask = va_arg(lp, int); + break; + case SES_LISTEN_INTERFACE: + listen_interface = va_arg(lp, char const*); + break; + default: + // skip unknown tags + va_arg(lp, void*); + break; + } + + tag = va_arg(lp, int); + } + + if (listen_range.first != -1 && (listen_range.second == -1 + || listen_range.second < listen_range.first)) + listen_range.second = listen_range.first; + + return new (std::nothrow) session(fing, listen_range, listen_interface, flags, alert_mask); +} + +TORRENT_EXPORT void session_close(void* ses) +{ + delete (libtorrent::session*)ses; +} + +TORRENT_EXPORT void session_start_dht(void *ses) +{ + using namespace libtorrent; + session *s = (session*) ses; + + s->start_dht(); +} + + + /* +TORRENT_EXPORT int session_add_torrent(void* ses, int tag, ...) +{ + using namespace libtorrent; + + va_list lp; + va_start(lp, tag); + session* s = (session*)ses; + add_torrent_params params; + + char const* torrent_data = 0; + int torrent_size = 0; + + char const* resume_data = 0; + int resume_size = 0; + + char const* magnet_url = 0; + + error_code ec; + + while (tag != TAG_END) + { + switch (tag) + { + case TOR_FILENAME: + params.ti = new (std::nothrow) torrent_info(va_arg(lp, char const*), ec); + break; + case TOR_TORRENT: + torrent_data = va_arg(lp, char const*); + break; + case TOR_TORRENT_SIZE: + torrent_size = va_arg(lp, int); + break; + case TOR_INFOHASH: + params.ti = new (std::nothrow) torrent_info(sha1_hash(va_arg(lp, char const*))); + break; + case TOR_INFOHASH_HEX: + { + sha1_hash ih; + from_hex(va_arg(lp, char const*), 40, (char*)&ih[0]); + params.ti = new (std::nothrow) torrent_info(ih); + break; + } + case TOR_MAGNETLINK: + magnet_url = va_arg(lp, char const*); + break; + case TOR_TRACKER_URL: + params.tracker_url = va_arg(lp, char const*); + break; + case TOR_RESUME_DATA: + resume_data = va_arg(lp, char const*); + break; + case TOR_RESUME_DATA_SIZE: + resume_size = va_arg(lp, int); + break; + case TOR_SAVE_PATH: + params.save_path = va_arg(lp, char const*); + break; + case TOR_NAME: + params.name = va_arg(lp, char const*); + break; + case TOR_PAUSED: + params.paused = va_arg(lp, int) != 0; + break; + case TOR_AUTO_MANAGED: + params.auto_managed = va_arg(lp, int) != 0; + break; + case TOR_DUPLICATE_IS_ERROR: + params.duplicate_is_error = va_arg(lp, int) != 0; + break; + case TOR_USER_DATA: + params.userdata = va_arg(lp, void*); + break; + case TOR_SEED_MODE: + params.seed_mode = va_arg(lp, int) != 0; + break; + case TOR_OVERRIDE_RESUME_DATA: + params.override_resume_data = va_arg(lp, int) != 0; + break; + case TOR_STORAGE_MODE: + params.storage_mode = (libtorrent::storage_mode_t)va_arg(lp, int); + break; + default: + // ignore unknown tags + va_arg(lp, void*); + break; + } + + tag = va_arg(lp, int); + } + + if (!params.ti && torrent_data && torrent_size) + params.ti = new (std::nothrow) torrent_info(torrent_data, torrent_size); + + std::vector rd; + if (resume_data && resume_size) + { + rd.assign(resume_data, resume_data + resume_size); + params.resume_data = &rd; + } + torrent_handle h; + if (!params.ti && magnet_url) + { + h = add_magnet_uri(*s, magnet_url, params, ec); + } + else + { + h = s->add_torrent(params, ec); + } + + if (!h.is_valid()) + { + return -1; + } + + int i = find_handle(h); + if (i == -1) i = add_handle(h); + + return i; +} + */ +void session_remove_torrent(void* ses, int tor, int flags) +{ + using namespace libtorrent; + torrent_handle h = get_handle(tor); + if (!h.is_valid()) return; + + session* s = (session*)ses; + s->remove_torrent(h, flags); +} + +int session_set_settings(void* ses, int tag, ...) +{ + using namespace libtorrent; + + // session* s = (session*)ses; + + va_list lp; + va_start(lp, tag); + + while (tag != TAG_END) + { + switch (tag) + { + /* + case SET_UPLOAD_RATE_LIMIT: + s->set_upload_rate_limit(va_arg(lp, int)); + break; + case SET_DOWNLOAD_RATE_LIMIT: + s->set_download_rate_limit(va_arg(lp, int)); + break; + case SET_LOCAL_UPLOAD_RATE_LIMIT: + s->set_local_upload_rate_limit(va_arg(lp, int)); + break; + case SET_LOCAL_DOWNLOAD_RATE_LIMIT: + s->set_local_download_rate_limit(va_arg(lp, int)); + break; + case SET_MAX_UPLOAD_SLOTS: + s->set_max_uploads(va_arg(lp, int)); + break; + case SET_MAX_CONNECTIONS: + s->set_max_connections(va_arg(lp, int)); + break; + case SET_HALF_OPEN_LIMIT: + s->set_max_half_open_connections(va_arg(lp, int)); + break; + case SET_PEER_PROXY: + { + libtorrent::proxy_settings ps; + copy_proxy_setting(&ps, va_arg(lp, struct proxy_setting const*)); + s->set_peer_proxy(ps); + } + case SET_WEB_SEED_PROXY: + { + libtorrent::proxy_settings ps; + copy_proxy_setting(&ps, va_arg(lp, struct proxy_setting const*)); + s->set_web_seed_proxy(ps); + } + case SET_TRACKER_PROXY: + { + libtorrent::proxy_settings ps; + copy_proxy_setting(&ps, va_arg(lp, struct proxy_setting const*)); + s->set_tracker_proxy(ps); + } +#ifndef TORRENT_DISABLE_DHT + case SET_DHT_PROXY: + { + libtorrent::proxy_settings ps; + copy_proxy_setting(&ps, va_arg(lp, struct proxy_setting const*)); + s->set_dht_proxy(ps); + } +#endif + case SET_PROXY: + { + libtorrent::proxy_settings ps; + copy_proxy_setting(&ps, va_arg(lp, struct proxy_setting const*)); + s->set_peer_proxy(ps); + s->set_web_seed_proxy(ps); + s->set_tracker_proxy(ps); +#ifndef TORRENT_DISABLE_DHT + s->set_dht_proxy(ps); +#endif + } + */ + default: + // ignore unknown tags + va_arg(lp, void*); + break; + } + + tag = va_arg(lp, int); + } + return 0; +} + +int session_get_setting(void* ses, int tag, void* value, int* value_size) +{ + using namespace libtorrent; + // session* s = (session*)ses; + + switch (tag) + { + /* + case SET_UPLOAD_RATE_LIMIT: + return set_int_value(value, value_size, s->upload_rate_limit()); + case SET_DOWNLOAD_RATE_LIMIT: + return set_int_value(value, value_size, s->download_rate_limit()); + case SET_LOCAL_UPLOAD_RATE_LIMIT: + return set_int_value(value, value_size, s->local_upload_rate_limit()); + case SET_LOCAL_DOWNLOAD_RATE_LIMIT: + return set_int_value(value, value_size, s->local_download_rate_limit()); + case SET_MAX_UPLOAD_SLOTS: + return set_int_value(value, value_size, s->max_uploads()); + case SET_MAX_CONNECTIONS: + return set_int_value(value, value_size, s->max_connections()); + case SET_HALF_OPEN_LIMIT: + return set_int_value(value, value_size, s->max_half_open_connections()); + */ + default: + return -2; + } +} + +int session_get_status(void* sesptr, struct session_status* s, int struct_size) +{ + libtorrent::session* ses = (libtorrent::session*)sesptr; + + libtorrent::session_status ss = ses->status(); + if (struct_size != sizeof(session_status)) return -1; + + s->has_incoming_connections = ss.has_incoming_connections; + + s->upload_rate = ss.upload_rate; + s->download_rate = ss.download_rate; + s->total_download = ss.total_download; + s->total_upload = ss.total_upload; + + s->payload_upload_rate = ss.payload_upload_rate; + s->payload_download_rate = ss.payload_download_rate; + s->total_payload_download = ss.total_payload_download; + s->total_payload_upload = ss.total_payload_upload; + + s->ip_overhead_upload_rate = ss.ip_overhead_upload_rate; + s->ip_overhead_download_rate = ss.ip_overhead_download_rate; + s->total_ip_overhead_download = ss.total_ip_overhead_download; + s->total_ip_overhead_upload = ss.total_ip_overhead_upload; + + s->dht_upload_rate = ss.dht_upload_rate; + s->dht_download_rate = ss.dht_download_rate; + s->total_dht_download = ss.total_dht_download; + s->total_dht_upload = ss.total_dht_upload; + + s->tracker_upload_rate = ss.tracker_upload_rate; + s->tracker_download_rate = ss.tracker_download_rate; + s->total_tracker_download = ss.total_tracker_download; + s->total_tracker_upload = ss.total_tracker_upload; + + s->total_redundant_bytes = ss.total_redundant_bytes; + s->total_failed_bytes = ss.total_failed_bytes; + + s->num_peers = ss.num_peers; + s->num_unchoked = ss.num_unchoked; + s->allowed_upload_slots = ss.allowed_upload_slots; + + s->up_bandwidth_queue = ss.up_bandwidth_queue; + s->down_bandwidth_queue = ss.down_bandwidth_queue; + + s->up_bandwidth_bytes_queue = ss.up_bandwidth_bytes_queue; + s->down_bandwidth_bytes_queue = ss.down_bandwidth_bytes_queue; + + s->optimistic_unchoke_counter = ss.optimistic_unchoke_counter; + s->unchoke_counter = ss.unchoke_counter; + + s->dht_nodes = ss.dht_nodes; + s->dht_node_cache = ss.dht_node_cache; + s->dht_torrents = ss.dht_torrents; + s->dht_global_nodes = ss.dht_global_nodes; + return 0; +} + +int torrent_get_status(int tor, torrent_status* s, int struct_size) +{ + libtorrent::torrent_handle h = get_handle(tor); + if (!h.is_valid()) return -1; + + libtorrent::torrent_status ts = h.status(); + + if (struct_size != sizeof(torrent_status)) return -1; + + s->state = (state_t)ts.state; + s->paused = ts.paused; + s->progress = ts.progress; + strncpy(s->error, ts.error.c_str(), 1025); + s->next_announce = ts.next_announce.total_seconds(); + s->announce_interval = ts.announce_interval.total_seconds(); + strncpy(s->current_tracker, ts.current_tracker.c_str(), 512); + s->total_download = ts.total_download = ts.total_download = ts.total_download; + s->total_upload = ts.total_upload = ts.total_upload = ts.total_upload; + s->total_payload_download = ts.total_payload_download; + s->total_payload_upload = ts.total_payload_upload; + s->total_failed_bytes = ts.total_failed_bytes; + s->total_redundant_bytes = ts.total_redundant_bytes; + s->download_rate = ts.download_rate; + s->upload_rate = ts.upload_rate; + s->download_payload_rate = ts.download_payload_rate; + s->upload_payload_rate = ts.upload_payload_rate; + s->num_seeds = ts.num_seeds; + s->num_peers = ts.num_peers; + s->num_complete = ts.num_complete; + s->num_incomplete = ts.num_incomplete; + s->list_seeds = ts.list_seeds; + s->list_peers = ts.list_peers; + s->connect_candidates = ts.connect_candidates; + s->num_pieces = ts.num_pieces; + s->total_done = ts.total_done; + s->total_wanted_done = ts.total_wanted_done; + s->total_wanted = ts.total_wanted; + s->distributed_copies = ts.distributed_copies; + s->block_size = ts.block_size; + s->num_uploads = ts.num_uploads; + s->num_connections = ts.num_connections; + s->uploads_limit = ts.uploads_limit; + s->connections_limit = ts.connections_limit; +// s->storage_mode = (storage_mode_t)ts.storage_mode; + s->up_bandwidth_queue = ts.up_bandwidth_queue; + s->down_bandwidth_queue = ts.down_bandwidth_queue; + s->all_time_upload = ts.all_time_upload; + s->all_time_download = ts.all_time_download; + s->active_time = ts.active_time; + s->seeding_time = ts.seeding_time; + s->seed_rank = ts.seed_rank; + s->last_scrape = ts.last_scrape; + s->has_incoming = ts.has_incoming; + s->sparse_regions = ts.sparse_regions; + s->seed_mode = ts.seed_mode; + return 0; +} + +int torrent_set_settings(int tor, int tag, ...) +{ + using namespace libtorrent; + torrent_handle h = get_handle(tor); + if (!h.is_valid()) return -1; + + va_list lp; + va_start(lp, tag); + + while (tag != TAG_END) + { + switch (tag) + { + case SET_UPLOAD_RATE_LIMIT: + h.set_upload_limit(va_arg(lp, int)); + break; + case SET_DOWNLOAD_RATE_LIMIT: + h.set_download_limit(va_arg(lp, int)); + break; + case SET_MAX_UPLOAD_SLOTS: + h.set_max_uploads(va_arg(lp, int)); + break; + case SET_MAX_CONNECTIONS: + h.set_max_connections(va_arg(lp, int)); + break; + case SET_SEQUENTIAL_DOWNLOAD: + h.set_sequential_download(va_arg(lp, int) != 0); + break; + case SET_SUPER_SEEDING: + h.super_seeding(va_arg(lp, int) != 0); + break; + default: + // ignore unknown tags + va_arg(lp, void*); + break; + } + + tag = va_arg(lp, int); + } + return 0; +} + +int torrent_get_setting(int tor, int tag, void* value, int* value_size) +{ + using namespace libtorrent; + torrent_handle h = get_handle(tor); + if (!h.is_valid()) return -1; + + switch (tag) + { + case SET_UPLOAD_RATE_LIMIT: + return set_int_value(value, value_size, h.upload_limit()); + case SET_DOWNLOAD_RATE_LIMIT: + return set_int_value(value, value_size, h.download_limit()); + case SET_MAX_UPLOAD_SLOTS: + return set_int_value(value, value_size, h.max_uploads()); + case SET_MAX_CONNECTIONS: + return set_int_value(value, value_size, h.max_connections()); + case SET_SEQUENTIAL_DOWNLOAD: + return set_int_value(value, value_size, h.is_sequential_download()); + case SET_SUPER_SEEDING: + return set_int_value(value, value_size, h.super_seeding()); + default: + return -2; + } +} + +} // extern "C" + diff --git a/libs/libks/test/libtorrent.h b/libs/libks/test/libtorrent.h new file mode 100644 index 0000000000..57cfa0ed47 --- /dev/null +++ b/libs/libks/test/libtorrent.h @@ -0,0 +1,278 @@ +/* + +Copyright (c) 2009, Arvid Norberg +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the distribution. + * Neither the name of the author nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +*/ + +#ifndef LIBTORRENT_H +#define LIBTORRENT_H + +enum tags +{ + TAG_END = 0, + + SES_FINGERPRINT, // char const*, 2 character string + SES_LISTENPORT, // int + SES_LISTENPORT_END, // int + SES_VERSION_MAJOR, // int + SES_VERSION_MINOR, // int + SES_VERSION_TINY, // int + SES_VERSION_TAG, // int + SES_FLAGS, // int + SES_ALERT_MASK, // int + SES_LISTEN_INTERFACE, // char const* + + // === add_torrent tags === + + // identifying the torrent to add + TOR_FILENAME = 0x100, // char const* + TOR_TORRENT, // char const*, specify size of buffer with TOR_TORRENT_SIZE + TOR_TORRENT_SIZE, // int + TOR_INFOHASH, // char const*, must point to a 20 byte array + TOR_INFOHASH_HEX, // char const*, must point to a 40 byte string + TOR_MAGNETLINK, // char const*, url + + TOR_TRACKER_URL, // char const* + TOR_RESUME_DATA, // char const* + TOR_RESUME_DATA_SIZE, // int + TOR_SAVE_PATH, // char const* + TOR_NAME, // char const* + TOR_PAUSED, // int + TOR_AUTO_MANAGED, // int + TOR_DUPLICATE_IS_ERROR, // int + TOR_USER_DATA, //void* + TOR_SEED_MODE, // int + TOR_OVERRIDE_RESUME_DATA, // int + TOR_STORAGE_MODE, // int + + SET_UPLOAD_RATE_LIMIT = 0x200, // int + SET_DOWNLOAD_RATE_LIMIT, // int + SET_LOCAL_UPLOAD_RATE_LIMIT, // int + SET_LOCAL_DOWNLOAD_RATE_LIMIT, // int + SET_MAX_UPLOAD_SLOTS, // int + SET_MAX_CONNECTIONS, // int + SET_SEQUENTIAL_DOWNLOAD, // int, torrent only + SET_SUPER_SEEDING, // int, torrent only + SET_HALF_OPEN_LIMIT, // int, session only + SET_PEER_PROXY, // proxy_setting const*, session_only + SET_WEB_SEED_PROXY, // proxy_setting const*, session_only + SET_TRACKER_PROXY, // proxy_setting const*, session_only + SET_DHT_PROXY, // proxy_setting const*, session_only + SET_PROXY, // proxy_setting const*, session_only +}; + +struct proxy_setting +{ + char hostname[256]; + int port; + + char username[256]; + char password[256]; + + int type; +}; + +enum proxy_type_t +{ + proxy_none, + proxy_socks4, + proxy_socks5, + proxy_socks5_pw, + proxy_http, + proxy_http_pw +}; + +enum storage_mode_t +{ + storage_mode_allocate = 0, + storage_mode_sparse, + storage_mode_compact +}; + +enum state_t +{ + queued_for_checking, + checking_files, + downloading_metadata, + downloading, + finished, + seeding, + allocating, + checking_resume_data +}; + +struct torrent_status +{ + enum state_t state; + int paused; + float progress; + char error[1024]; + int next_announce; + int announce_interval; + char current_tracker[512]; + long long total_download; + long long total_upload; + long long total_payload_download; + long long total_payload_upload; + long long total_failed_bytes; + long long total_redundant_bytes; + float download_rate; + float upload_rate; + float download_payload_rate; + float upload_payload_rate; + int num_seeds; + int num_peers; + int num_complete; + int num_incomplete; + int list_seeds; + int list_peers; + int connect_candidates; + + // what to do? +// bitfield pieces; + + int num_pieces; + long long total_done; + long long total_wanted_done; + long long total_wanted; + float distributed_copies; + int block_size; + int num_uploads; + int num_connections; + int uploads_limit; + int connections_limit; +// enum storage_mode_t storage_mode; + int up_bandwidth_queue; + int down_bandwidth_queue; + long long all_time_upload; + long long all_time_download; + int active_time; + int seeding_time; + int seed_rank; + int last_scrape; + int has_incoming; + int sparse_regions; + int seed_mode; +}; + +struct session_status +{ + int has_incoming_connections; + + float upload_rate; + float download_rate; + long long total_download; + long long total_upload; + + float payload_upload_rate; + float payload_download_rate; + long long total_payload_download; + long long total_payload_upload; + + float ip_overhead_upload_rate; + float ip_overhead_download_rate; + long long total_ip_overhead_download; + long long total_ip_overhead_upload; + + float dht_upload_rate; + float dht_download_rate; + long long total_dht_download; + long long total_dht_upload; + + float tracker_upload_rate; + float tracker_download_rate; + long long total_tracker_download; + long long total_tracker_upload; + + long long total_redundant_bytes; + long long total_failed_bytes; + + int num_peers; + int num_unchoked; + int allowed_upload_slots; + + int up_bandwidth_queue; + int down_bandwidth_queue; + + int up_bandwidth_bytes_queue; + int down_bandwidth_bytes_queue; + + int optimistic_unchoke_counter; + int unchoke_counter; + + int dht_nodes; + int dht_node_cache; + int dht_torrents; + long long dht_global_nodes; +// std::vector active_requests; +}; + +#ifdef __cplusplus +extern "C" +{ +#endif + +// the functions whose signature ends with: +// , int first_tag, ...); +// takes a tag list. The tag list is a series +// of tag-value pairs. The tags are constants +// identifying which property the value controls. +// The type of the value varies between tags. +// The enumeration above specifies which type +// it expects. All tag lists must always be +// terminated by TAG_END. + +// use SES_* tags in tag list +void* session_create(int first_tag, ...); +void session_close(void* ses); + +void session_start_dht(void *ses); + +// use TOR_* tags in tag list +int session_add_torrent(void* ses, int first_tag, ...); +void session_remove_torrent(void* ses, int tor, int flags); + +int session_get_status(void* ses, struct session_status* s, int struct_size); + +// use SET_* tags in tag list +int session_set_settings(void* ses, int first_tag, ...); +int session_get_setting(void* ses, int tag, void* value, int* value_size); + +int torrent_get_status(int tor, struct torrent_status* s, int struct_size); + +// use SET_* tags in tag list +int torrent_set_settings(int tor, int first_tag, ...); +int torrent_get_setting(int tor, int tag, void* value, int* value_size); + +#ifdef __cplusplus +} +#endif + +#endif + diff --git a/libs/libks/test/tap.c b/libs/libks/test/tap.c new file mode 100644 index 0000000000..152e39e8e0 --- /dev/null +++ b/libs/libks/test/tap.c @@ -0,0 +1,354 @@ +/* +libtap - Write tests in C +Copyright 2012 Jake Gelbman +This file is licensed under the LGPL +*/ + +#define _DEFAULT_SOURCE 1 + +#include +#include +#include +#include +#include "tap.h" + +static int expected_tests = NO_PLAN; +static int failed_tests; +static int current_test; +static char *todo_mesg; + +static char * +vstrdupf (const char *fmt, va_list args) { + char *str; + int size; + va_list args2; + va_copy(args2, args); + if (!fmt) + fmt = ""; + size = vsnprintf(NULL, 0, fmt, args2) + 2; + str = malloc(size); + if (!str) { + perror("malloc error"); + exit(1); + } + vsprintf(str, fmt, args); + va_end(args2); + return str; +} + +void +tap_plan (int tests, const char *fmt, ...) { + expected_tests = tests; + if (tests == SKIP_ALL) { + char *why; + va_list args; + va_start(args, fmt); + why = vstrdupf(fmt, args); + va_end(args); + printf("1..0 "); + diag("SKIP %s\n", why); + exit(0); + } + if (tests != NO_PLAN) { + printf("1..%d\n", tests); + } +} + +int +vok_at_loc (const char *file, int line, int test, const char *fmt, + va_list args) +{ + char *name = vstrdupf(fmt, args); + if (!test) { + printf("not "); + } + printf("ok %d", ++current_test); + if (*name) + printf(" - %s", name); + if (todo_mesg) { + printf(" # TODO"); + if (*todo_mesg) + printf(" %s", todo_mesg); + } + printf("\n"); + if (!test) { + printf("# Failed "); + if (todo_mesg) + printf("(TODO) "); + printf("test "); + if (*name) + printf("'%s'\n# ", name); + printf("at %s line %d.\n", file, line); + if (!todo_mesg) + failed_tests++; + } + free(name); + return test; +} + +int +ok_at_loc (const char *file, int line, int test, const char *fmt, ...) { + va_list args; + va_start(args, fmt); + vok_at_loc(file, line, test, fmt, args); + va_end(args); + return test; +} + +static int +mystrcmp (const char *a, const char *b) { + return a == b ? 0 : !a ? -1 : !b ? 1 : strcmp(a, b); +} + +#define eq(a, b) (!mystrcmp(a, b)) +#define ne(a, b) (mystrcmp(a, b)) + +int +is_at_loc (const char *file, int line, const char *got, const char *expected, + const char *fmt, ...) +{ + int test = eq(got, expected); + va_list args; + va_start(args, fmt); + vok_at_loc(file, line, test, fmt, args); + va_end(args); + if (!test) { + diag(" got: '%s'", got); + diag(" expected: '%s'", expected); + } + return test; +} + +int +isnt_at_loc (const char *file, int line, const char *got, const char *expected, + const char *fmt, ...) +{ + int test = ne(got, expected); + va_list args; + va_start(args, fmt); + vok_at_loc(file, line, test, fmt, args); + va_end(args); + if (!test) { + diag(" got: '%s'", got); + diag(" expected: anything else"); + } + return test; +} + +int +cmp_ok_at_loc (const char *file, int line, int a, const char *op, int b, + const char *fmt, ...) +{ + int test = eq(op, "||") ? a || b + : eq(op, "&&") ? a && b + : eq(op, "|") ? a | b + : eq(op, "^") ? a ^ b + : eq(op, "&") ? a & b + : eq(op, "==") ? a == b + : eq(op, "!=") ? a != b + : eq(op, "<") ? a < b + : eq(op, ">") ? a > b + : eq(op, "<=") ? a <= b + : eq(op, ">=") ? a >= b + : eq(op, "<<") ? a << b + : eq(op, ">>") ? a >> b + : eq(op, "+") ? a + b + : eq(op, "-") ? a - b + : eq(op, "*") ? a * b + : eq(op, "/") ? a / b + : eq(op, "%") ? a % b + : diag("unrecognized operator '%s'", op); + va_list args; + va_start(args, fmt); + vok_at_loc(file, line, test, fmt, args); + va_end(args); + if (!test) { + diag(" %d", a); + diag(" %s", op); + diag(" %d", b); + } + return test; +} + +static int +find_mem_diff (const char *a, const char *b, size_t n, size_t *offset) { + size_t i; + if (a == b) + return 0; + if (!a || !b) + return 2; + for (i = 0; i < n; i++) { + if (a[i] != b[i]) { + *offset = i; + return 1; + } + } + return 0; +} + +int +cmp_mem_at_loc (const char *file, int line, const void *got, + const void *expected, size_t n, const char *fmt, ...) +{ + size_t offset; + int diff = find_mem_diff(got, expected, n, &offset); + va_list args; + va_start(args, fmt); + vok_at_loc(file, line, !diff, fmt, args); + va_end(args); + if (diff == 1) { + diag(" Difference starts at offset %d", offset); + diag(" got: 0x%02x", ((unsigned char *)got)[offset]); + diag(" expected: 0x%02x", ((unsigned char *)expected)[offset]); + } + else if (diff == 2) { + diag(" got: %s", got ? "not NULL" : "NULL"); + diag(" expected: %s", expected ? "not NULL" : "NULL"); + } + return !diff; +} + +int +diag (const char *fmt, ...) { + va_list args; + char *mesg, *line; + int i; + va_start(args, fmt); + if (!fmt) + return 0; + mesg = vstrdupf(fmt, args); + line = mesg; + for (i = 0; *line; i++) { + char c = mesg[i]; + if (!c || c == '\n') { + mesg[i] = '\0'; + printf("# %s\n", line); + if (!c) + break; + mesg[i] = c; + line = mesg + i + 1; + } + } + free(mesg); + va_end(args); + return 0; +} + +int +exit_status () { + int retval = 0; + if (expected_tests == NO_PLAN) { + printf("1..%d\n", current_test); + } + else if (current_test != expected_tests) { + diag("Looks like you planned %d test%s but ran %d.", + expected_tests, expected_tests > 1 ? "s" : "", current_test); + retval = 2; + } + if (failed_tests) { + diag("Looks like you failed %d test%s of %d run.", + failed_tests, failed_tests > 1 ? "s" : "", current_test); + retval = 1; + } + return retval; +} + +int +bail_out (int ignore, const char *fmt, ...) { + va_list args; + va_start(args, fmt); + printf("Bail out! "); + vprintf(fmt, args); + printf("\n"); + va_end(args); + exit(255); + return 0; +} + +void +tap_skip (int n, const char *fmt, ...) { + char *why; + va_list args; + va_start(args, fmt); + why = vstrdupf(fmt, args); + va_end(args); + while (n --> 0) { + printf("ok %d ", ++current_test); + diag("skip %s\n", why); + } + free(why); +} + +void +tap_todo (int ignore, const char *fmt, ...) { + va_list args; + va_start(args, fmt); + todo_mesg = vstrdupf(fmt, args); + va_end(args); +} + +void +tap_end_todo () { + free(todo_mesg); + todo_mesg = NULL; +} + +#ifndef _WIN32 +#include +#include +#include + +#if defined __APPLE__ || defined BSD +#define MAP_ANONYMOUS MAP_ANON +#endif + +/* Create a shared memory int to keep track of whether a piece of code executed +dies. to be used in the dies_ok and lives_ok macros. */ +int +tap_test_died (int status) { + static int *test_died = NULL; + int prev; + if (!test_died) { + test_died = mmap(0, sizeof (int), PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + *test_died = 0; + } + prev = *test_died; + *test_died = status; + return prev; +} + +int +like_at_loc (int for_match, const char *file, int line, const char *got, + const char *expected, const char *fmt, ...) +{ + int test; + regex_t re; + va_list args; + int err = regcomp(&re, expected, REG_EXTENDED); + if (err) { + char errbuf[256]; + regerror(err, &re, errbuf, sizeof errbuf); + fprintf(stderr, "Unable to compile regex '%s': %s at %s line %d\n", + expected, errbuf, file, line); + exit(255); + } + err = regexec(&re, got, 0, NULL, 0); + regfree(&re); + test = for_match ? !err : err; + va_start(args, fmt); + vok_at_loc(file, line, test, fmt, args); + va_end(args); + if (!test) { + if (for_match) { + diag(" '%s'", got); + diag(" doesn't match: '%s'", expected); + } + else { + diag(" '%s'", got); + diag(" matches: '%s'", expected); + } + } + return test; +} +#endif diff --git a/libs/libks/test/tap.h b/libs/libks/test/tap.h new file mode 100644 index 0000000000..8269e7ead7 --- /dev/null +++ b/libs/libks/test/tap.h @@ -0,0 +1,115 @@ +/* +libtap - Write tests in C +Copyright 2012 Jake Gelbman +This file is licensed under the LGPL +*/ + +#ifndef __TAP_H__ +#define __TAP_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef va_copy +#ifdef __va_copy +#define va_copy __va_copy +#else +#define va_copy(d, s) ((d) = (s)) +#endif +#endif + +#include +#include +#include + +int vok_at_loc (const char *file, int line, int test, const char *fmt, + va_list args); +int ok_at_loc (const char *file, int line, int test, const char *fmt, + ...); +int is_at_loc (const char *file, int line, const char *got, + const char *expected, const char *fmt, ...); +int isnt_at_loc (const char *file, int line, const char *got, + const char *expected, const char *fmt, ...); +int cmp_ok_at_loc (const char *file, int line, int a, const char *op, + int b, const char *fmt, ...); +int cmp_mem_at_loc (const char *file, int line, const void *got, + const void *expected, size_t n, const char *fmt, ...); +int bail_out (int ignore, const char *fmt, ...); +void tap_plan (int tests, const char *fmt, ...); +int diag (const char *fmt, ...); +int exit_status (void); +void tap_skip (int n, const char *fmt, ...); +void tap_todo (int ignore, const char *fmt, ...); +void tap_end_todo (void); + +#define NO_PLAN -1 +#define SKIP_ALL -2 +#define ok(...) ok_at_loc(__FILE__, __LINE__, __VA_ARGS__, NULL) +#define is(...) is_at_loc(__FILE__, __LINE__, __VA_ARGS__, NULL) +#define isnt(...) isnt_at_loc(__FILE__, __LINE__, __VA_ARGS__, NULL) +#define cmp_ok(...) cmp_ok_at_loc(__FILE__, __LINE__, __VA_ARGS__, NULL) +#define cmp_mem(...) cmp_mem_at_loc(__FILE__, __LINE__, __VA_ARGS__, NULL); +#define plan(...) tap_plan(__VA_ARGS__, NULL) +#define done_testing() return exit_status() +#define BAIL_OUT(...) bail_out(0, "" __VA_ARGS__, NULL) +#define pass(...) ok(1, "" __VA_ARGS__) +#define fail(...) ok(0, "" __VA_ARGS__) + +#define skip(test, ...) do {if (test) {tap_skip(__VA_ARGS__, NULL); break;} +#define end_skip } while (0) + +#define todo(...) tap_todo(0, "" __VA_ARGS__, NULL) +#define end_todo tap_end_todo() + +#define dies_ok(...) dies_ok_common(1, __VA_ARGS__) +#define lives_ok(...) dies_ok_common(0, __VA_ARGS__) + +#ifdef _WIN32 +#define like(...) tap_skip(1, "like is not implemented on Windows") +#define unlike tap_skip(1, "unlike is not implemented on Windows") +#define dies_ok_common(...) \ + tap_skip(1, "Death detection is not supported on Windows") +#else +#define like(...) like_at_loc(1, __FILE__, __LINE__, __VA_ARGS__, NULL) +#define unlike(...) like_at_loc(0, __FILE__, __LINE__, __VA_ARGS__, NULL) +int like_at_loc (int for_match, const char *file, int line, + const char *got, const char *expected, + const char *fmt, ...); +#include +#include +#include +int tap_test_died (int status); +#define dies_ok_common(for_death, code, ...) \ + do { \ + int cpid; \ + int it_died; \ + tap_test_died(1); \ + cpid = fork(); \ + switch (cpid) { \ + case -1: \ + perror("fork error"); \ + exit(1); \ + case 0: \ + close(1); \ + close(2); \ + code \ + tap_test_died(0); \ + exit(0); \ + } \ + if (waitpid(cpid, NULL, 0) < 0) { \ + perror("waitpid error"); \ + exit(1); \ + } \ + it_died = tap_test_died(0); \ + if (!it_died) \ + {code} \ + ok(for_death ? it_died : !it_died, "" __VA_ARGS__); \ + } while (0) +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/libs/libks/test/testdht-msg.c b/libs/libks/test/testdht-msg.c new file mode 100644 index 0000000000..a49c0036d4 --- /dev/null +++ b/libs/libks/test/testdht-msg.c @@ -0,0 +1,23 @@ +#include +#include + +/* + Test should cover all cases of DHT message construction and parsing tests + * init ks + * init a client if required + * TODO: list out msg construction and parsing tests + * Immutable messages: + * 1. create and validate single immutable messages, and handle error cases such as too large, etc. + * Mutable messages: + * 1. Create message error cases + * 2. Create initial message cases + * 3. Update message cases + * 4. + * cleanup ks + */ + +int main() { + + + done_testing(); +} diff --git a/libs/libks/test/testdht-net.c b/libs/libks/test/testdht-net.c new file mode 100644 index 0000000000..06db097ec9 --- /dev/null +++ b/libs/libks/test/testdht-net.c @@ -0,0 +1,18 @@ +#include +#include + +/* + Test should cover all cases of DHT networking for ipv4 and ipv6 + * Find ip + * init 2 or more clients(with dedicated ports) + * add ip to clients + * TODO: list out nework specific tests. + * shutdown clients + * cleanup ks + */ + +int main() { + + + done_testing(); +} diff --git a/libs/libks/test/testdht.c b/libs/libks/test/testdht.c new file mode 100644 index 0000000000..9a7968353b --- /dev/null +++ b/libs/libks/test/testdht.c @@ -0,0 +1,139 @@ +#include +#include + +/* + Test should cover end to end DHT functionality that isn't covered by a more specific test file + * Find ip + * init 2 or more clients(with dedicated ports) + * add ip to clients + * exchange peers between clients + * shutdown clients + * cleanup ks + */ + +int main() { + int err = 0; + char v4[48] = {0}, v6[48] = {0}; + int mask = 0, have_v4 = 0, have_v6 = 0; + int A_port = 5998, B_port = 5999; + dht_handle_t *A_h = NULL, *B_h = NULL; + ks_dht_af_flag_t af_flags = 0; + static ks_sockaddr_t bootstrap[1]; + + err = ks_init(); + ok(!err); + + err = ks_find_local_ip(v4, sizeof(v4), &mask, AF_INET, NULL); + ok(err == KS_STATUS_SUCCESS); + have_v4 = !zstr_buf(v4); + + err = ks_find_local_ip(v6, sizeof(v6), NULL, AF_INET6, NULL); + ok(err == KS_STATUS_SUCCESS); + + have_v6 = !zstr_buf(v6); + + ok(have_v4 || have_v6); + if (have_v4) { + af_flags |= KS_DHT_AF_INET4; + } + diag("Adding local bind ipv4 of (%s) %d\n", v4, have_v4); + + if (have_v6) { + af_flags |= KS_DHT_AF_INET6; + } + diag("Adding local bind ipv6 of (%s) %d\n", v6, have_v6); + + err = ks_dht_init(&A_h, af_flags, NULL, A_port); + ok(err == KS_STATUS_SUCCESS); + + if (have_v4) { + err = ks_dht_add_ip(A_h, v4, A_port); + ok(err == KS_STATUS_SUCCESS); + } + + if (have_v6) { + err = ks_dht_add_ip(A_h, v6, A_port); + ok(err == KS_STATUS_SUCCESS); + } + + err = ks_dht_init(&B_h, af_flags, NULL, B_port); + ok(err == KS_STATUS_SUCCESS); + + if (have_v4) { + err = ks_dht_add_ip(B_h, v4, B_port); + ok(err == KS_STATUS_SUCCESS); + } + + if (have_v6) { + err = ks_dht_add_ip(B_h, v6, B_port); + ok(err == KS_STATUS_SUCCESS); + } + + ks_dht_start(A_h); + ks_dht_start(B_h); + + ks_addr_set(&bootstrap[0], v4, B_port, 0); + + /* Have A ping B */ + dht_ping_node(A_h, &bootstrap[0]); + + /* Start test series */ + + /* Absent in Test and Example App */ + /* + This function is called from the test app, with the intent of processing and handling network packets(buf, buflen, from). + Tests for this function should include successful processing of new inbound messages, as well as validation of bad inbound messages. + KS_DECLARE(int) dht_periodic(dht_handle_t *h, const void *buf, size_t buflen, ks_sockaddr_t *from); */ + /* + This function is like the dht_ping_node, except it only adds the node, and waits for dht_periodic to decide when to ping the node. + Doing a node ping first confirms that we have working networking to the new remote node. + KS_DECLARE(int) dht_insert_node(dht_handle_t *h, const unsigned char *id, ks_sockaddr_t *sa); */ + /* + Queries for node stats. Will be used for validating that a node was successfully added. Call before the ping, ping, call after, and compare. + KS_DECLARE(int) dht_nodes(dht_handle_t *h, int af, int *good_return, int *dubious_return, int *cached_return, int *incoming_return); */ + /* + Sets(or changes?) the local DHT listening port. Would be very interesting to see what happens if this is called after nodes are pinged. + KS_DECLARE(void) ks_dht_set_port(dht_handle_t *h, unsigned int port); */ + + + /* Present in Example App but Absent in Test */ + /* + ks_dht_send_message_mutable_cjson(h, alice_secretkey, alice_publickey, NULL, message_id, 1, output, 600); */ + /* + ks_separate_string(cmd_dup, " ", argv, (sizeof(argv) / sizeof(argv[0]))); */ + /* + ks_dht_api_find_node(h, argv[2], argv[3], ipv6); */ + /* + ks_global_set_default_logger(atoi(line + 9)); */ + /* + ks_dht_set_param(h, DHT_PARAM_AUTOROUTE, KS_TRUE); */ + /* + Like dht_periodic, except executes only one loop of work. + ks_dht_one_loop(h, 0); */ + /* + Returns a list of local bindings. Most useful after the DHT_PARAM_AUTOROUTE to return which routes it bound to. + ks_dht_get_bind_addrs(h, &bindings, &len); */ + /* + Callback for different message type actions. Called from the dht_periodic functions. + ks_dht_set_callback(h, callback, NULL); */ + /* + Executes a search for a particular SHA hash. Pings known nodes to see if they have the hash. callback is called with results. + dht_search(h, hash, globals.port, AF_INET, callback, NULL); */ + /* + Print the contents of the 'dht tables' to stdout. Need a version that gets info in a testable format. + dht_dump_tables(h, stdout); */ + /* dht_get_nodes(h, sin, &num, sin6, &num6); */ + /* + Shuts down the DHT handle, and should properly clean up. + dht_uninit(&h); */ + + /* Cleanup and shutdown */ + + todo("ks_dht_stop()"); + todo("ks_dht_destroy()"); + + err = ks_shutdown(); + ok(!err); + + done_testing(); +} diff --git a/libs/libks/test/testhash.c b/libs/libks/test/testhash.c new file mode 100644 index 0000000000..e595a04164 --- /dev/null +++ b/libs/libks/test/testhash.c @@ -0,0 +1,137 @@ +#include "ks.h" +#include "tap.h" + +int test1(void) +{ + ks_pool_t *pool; + ks_hash_t *hash; + int i, sum1 = 0, sum2 = 0; + + ks_pool_open(&pool); + ks_hash_create(&hash, KS_HASH_MODE_DEFAULT, KS_HASH_FREE_BOTH | KS_HASH_FLAG_RWLOCK, pool); + + for (i = 1; i < 1001; i++) { + char *key = ks_pprintf(pool, "KEY %d", i); + char *val = ks_pprintf(pool, "%d", i); + ks_hash_insert(hash, key, val); + sum1 += i; + } + + + + ks_hash_iterator_t *itt; + + ks_hash_write_lock(hash); + for (itt = ks_hash_first(hash, KS_UNLOCKED); itt; itt = ks_hash_next(&itt)) { + const void *key; + void *val; + + ks_hash_this(itt, &key, NULL, &val); + + printf("%s=%s\n", (char *)key, (char *)val); + sum2 += atoi(val); + + ks_hash_remove(hash, (char *)key); + } + ks_hash_write_unlock(hash); + + ks_hash_destroy(&hash); + + ks_pool_close(&pool); + + return (sum1 == sum2); +} + +#define MAX 100 + +static void *test2_thread(ks_thread_t *thread, void *data) +{ + ks_hash_iterator_t *itt; + ks_hash_t *hash = (ks_hash_t *) data; + + while(thread->running) { + for (itt = ks_hash_first(hash, KS_READLOCKED); itt; itt = ks_hash_next(&itt)) { + const void *key; + void *val; + + ks_hash_this(itt, &key, NULL, &val); + + printf("%p ITT %s=%s\n", (void *)(intptr_t)ks_thread_self(), (char *)key, (char *)val); + } + ks_sleep(100000); + } + + + return NULL; +} + +int test2(void) +{ + ks_thread_t *threads[MAX]; + int ttl = 5; + int runs = 5; + ks_pool_t *pool; + ks_hash_t *hash; + int i; + ks_hash_iterator_t *itt; + + ks_pool_open(&pool); + ks_hash_create(&hash, KS_HASH_MODE_DEFAULT, KS_HASH_FREE_BOTH | KS_HASH_FLAG_RWLOCK, pool); + + for (i = 0; i < ttl; i++) { + ks_thread_create(&threads[i], test2_thread, hash, pool); + } + + for(i = 0; i < runs; i++) { + int x = rand() % 5; + int j; + + for (j = 0; j < 100; j++) { + char *key = ks_pprintf(pool, "KEY %d", j); + char *val = ks_pprintf(pool, "%d", j); + ks_hash_insert(hash, key, val); + } + + ks_sleep(x * 1000000); + + ks_hash_write_lock(hash); + for (itt = ks_hash_first(hash, KS_UNLOCKED); itt; itt = ks_hash_next(&itt)) { + const void *key; + void *val; + + ks_hash_this(itt, &key, NULL, &val); + + printf("DEL %s=%s\n", (char *)key, (char *)val); + ks_hash_remove(hash, (char *)key); + } + ks_hash_write_unlock(hash); + + } + + for (i = 0; i < ttl; i++) { + threads[i]->running = 0; + ks_thread_join(threads[i]); + } + + + ks_hash_destroy(&hash); + ks_pool_close(&pool); + + return 1; +} + +int main(int argc, char **argv) +{ + + ks_init(); + srand((unsigned)(time(NULL) - (unsigned)(intptr_t)ks_thread_self())); + + plan(2); + + ok(test1()); + ok(test2()); + + ks_shutdown(); + + done_testing(); +} diff --git a/libs/libks/test/testhash.vcxproj b/libs/libks/test/testhash.vcxproj new file mode 100644 index 0000000000..c3bf783a68 --- /dev/null +++ b/libs/libks/test/testhash.vcxproj @@ -0,0 +1,170 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + {43724CF4-FCE1-44FE-AB36-C86E3979B350} + Win32Proj + testhash + 8.1 + + + + Application + true + v140_xp + Unicode + + + Application + false + v140_xp + true + Unicode + + + Application + true + v140_xp + Unicode + + + Application + false + v140_xp + true + Unicode + + + + + + + + + + + + + + + + + + + + + true + $(Platform)\$(Configuration)\$(ProjectName)\ + $(SolutionDir)$(Platform)\$(Configuration)\ + + + true + $(Platform)\$(Configuration)\$(ProjectName)\ + + + false + $(Platform)\$(Configuration)\$(ProjectName)\ + $(SolutionDir)$(Platform)\$(Configuration)\ + + + false + $(Platform)\$(Configuration)\$(ProjectName)\ + + + + + + Level3 + Disabled + _CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + ../src/include;. + + + Console + true + + + + + + + Level3 + Disabled + _CRT_SECURE_NO_WARNINGS;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + ../src/include;. + + + Console + true + + + + + Level3 + + + MaxSpeed + true + true + _CRT_SECURE_NO_WARNINGS;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + ../src/include;. + + + Console + true + true + true + + + + + Level3 + + + MaxSpeed + true + true + _CRT_SECURE_NO_WARNINGS;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + ../src/include;. + + + Console + true + true + true + + + + + {70d178d8-1100-4152-86c0-809a91cff832} + + + + + + + + + + \ No newline at end of file diff --git a/libs/libks/test/testpools.c b/libs/libks/test/testpools.c index 48580a0525..b3be4ebfbf 100644 --- a/libs/libks/test/testpools.c +++ b/libs/libks/test/testpools.c @@ -1,15 +1,54 @@ +#include "ks.h" + #include #include -#include "mpool.h" #include +#include "tap.h" + +static void fill(char *str, int bytes, char c) +{ + memset(str, c, bytes -1); + *(str+(bytes-1)) = '\0'; +} + +struct foo { + int x; + char *str; +}; + + +void cleanup(ks_pool_t *mpool, void *ptr, void *arg, int type, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t ctype) +{ + struct foo *foo = (struct foo *) ptr; + + printf("Cleanup %p action: %d\n", ptr, action); + + switch(action) { + case KS_MPCL_ANNOUNCE: + break; + case KS_MPCL_TEARDOWN: + break; + case KS_MPCL_DESTROY: + printf("DESTROY STR [%s]\n", foo->str); + free(foo->str); + foo->str = NULL; + } +} + + int main(int argc, char **argv) { - mpool_t *pool; + ks_pool_t *pool; int err = 0; char *str = NULL; - int x = 0; int bytes = 1024; + ks_status_t status; + struct foo *foo; + + ks_init(); + + plan(11); if (argc > 1) { int tmp = atoi(argv[1]); @@ -22,26 +61,153 @@ int main(int argc, char **argv) } } - pool = mpool_open(MPOOL_FLAG_ANONYMOUS, 0, NULL, &err); + status = ks_pool_open(&pool); - if (!pool || err != MPOOL_ERROR_NONE) { - fprintf(stderr, "ERR: %d [%s]\n", err, mpool_strerror(err)); + printf("OPEN:\n"); + ok(status == KS_STATUS_SUCCESS); + if (status != KS_STATUS_SUCCESS) { + fprintf(stderr, "OPEN ERR: %d [%s]\n", err, ks_pool_strerror(status)); exit(255); } - str = mpool_alloc(pool, bytes, &err); - memset(str+x, '.', bytes -1); - *(str+(bytes-1)) = '\0'; + printf("ALLOC:\n"); + str = ks_pool_alloc(pool, bytes); + ok(str != NULL); + if (!str) { + fprintf(stderr, "ALLOC ERR\n"); + exit(255); + } + + fill(str, bytes, '.'); printf("%s\n", str); - //mpool_clear(pool); - err = mpool_close(pool); - - if (err != MPOOL_ERROR_NONE) { - fprintf(stderr, "ERR: [%s]\n", mpool_strerror(err)); + printf("FREE:\n"); + + status = ks_pool_safe_free(pool, str); + if (status != KS_STATUS_SUCCESS) { + fprintf(stderr, "FREE ERR: [%s]\n", ks_pool_strerror(err)); exit(255); } + + printf("ALLOC2:\n"); + + str = ks_pool_alloc(pool, bytes); + + ok(str != NULL); + if (!str) { + fprintf(stderr, "ALLOC2 ERR: [FAILED]\n"); + exit(255); + } + + fill(str, bytes, '-'); + printf("%s\n", str); + + + printf("ALLOC OBJ:\n"); + + foo = ks_pool_alloc(pool, sizeof(struct foo)); + + ok(foo != NULL); + if (!foo) { + fprintf(stderr, "ALLOC OBJ: [FAILED]\n"); + exit(255); + } else { + printf("ALLOC OBJ [%p]:\n", (void *) foo); + } + + foo->x = 12; + foo->str = strdup("This is a test 1234 abcd; This will be called on explicit free\n"); + ks_pool_set_cleanup(pool, foo, NULL, 0, cleanup); + + printf("FREE OBJ:\n"); + + status = ks_pool_safe_free(pool, foo); + ok(status == KS_STATUS_SUCCESS); + if (status != KS_STATUS_SUCCESS) { + fprintf(stderr, "FREE OBJ ERR: [%s]\n", ks_pool_strerror(status)); + exit(255); + } + + + printf("ALLOC OBJ2:\n"); + + foo = ks_pool_alloc(pool, sizeof(struct foo)); + + ok(foo != NULL); + if (!foo) { + fprintf(stderr, "ALLOC OBJ2: [FAILED]\n"); + exit(255); + } else { + printf("ALLOC OBJ2 [%p]:\n", (void *) foo); + } + + foo->x = 12; + foo->str = strdup("This is a second test 1234 abcd; This will be called on pool clear/destroy\n"); + ks_pool_set_cleanup(pool, foo, NULL, 0, cleanup); + + + printf("ALLOC OBJ3:\n"); + + foo = ks_pool_alloc(pool, sizeof(struct foo)); + + ok(foo != NULL); + if (!foo) { + fprintf(stderr, "ALLOC OBJ3: [FAILED]\n"); + exit(255); + } else { + printf("ALLOC OBJ3 [%p]:\n", (void *) foo); + } + + foo->x = 12; + foo->str = strdup("This is a third test 1234 abcd; This will be called on pool clear/destroy\n"); + ks_pool_set_cleanup(pool, foo, NULL, 0, cleanup); + + + + printf("RESIZE:\n"); + bytes *= 2; + str = ks_pool_resize(pool, str, bytes); + + ok(str != NULL); + if (!str) { + fprintf(stderr, "RESIZE ERR: [FAILED]\n"); + exit(255); + } + + fill(str, bytes, '*'); + printf("%s\n", str); + + + printf("FREE 2:\n"); + + status = ks_pool_free(pool, str); + ok(status == KS_STATUS_SUCCESS); + if (status != KS_STATUS_SUCCESS) { + fprintf(stderr, "FREE2 ERR: [%s]\n", ks_pool_strerror(status)); + exit(255); + } + + + printf("CLEAR:\n"); + status = ks_pool_clear(pool); + + ok(status == KS_STATUS_SUCCESS); + if (status != KS_STATUS_SUCCESS) { + fprintf(stderr, "CLEAR ERR: [%s]\n", ks_pool_strerror(status)); + exit(255); + } + + printf("CLOSE:\n"); + status = ks_pool_close(&pool); - exit(0); + ok(status == KS_STATUS_SUCCESS); + if (status != KS_STATUS_SUCCESS) { + fprintf(stderr, "CLOSE ERR: [%s]\n", ks_pool_strerror(err)); + exit(255); + } + + ks_shutdown(); + + done_testing(); } diff --git a/libs/libks/test/testpools.vcxproj b/libs/libks/test/testpools.vcxproj new file mode 100644 index 0000000000..76a40e4c16 --- /dev/null +++ b/libs/libks/test/testpools.vcxproj @@ -0,0 +1,170 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + {5825A3B2-31A0-475A-AF32-44FB0D8B52D4} + Win32Proj + testpools + 8.1 + + + + Application + true + v140_xp + Unicode + + + Application + false + v140_xp + true + Unicode + + + Application + true + v140_xp + Unicode + + + Application + false + v140_xp + true + Unicode + + + + + + + + + + + + + + + + + + + + + true + $(Platform)\$(Configuration)\$(ProjectName)\ + $(SolutionDir)$(Platform)\$(Configuration)\ + + + true + $(Platform)\$(Configuration)\$(ProjectName)\ + + + false + $(Platform)\$(Configuration)\$(ProjectName)\ + $(SolutionDir)$(Platform)\$(Configuration)\ + + + false + $(Platform)\$(Configuration)\$(ProjectName)\ + + + + + + Level3 + Disabled + _CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + ../src/include;. + + + Console + true + + + + + + + Level3 + Disabled + _CRT_SECURE_NO_WARNINGS;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + ../src/include;. + + + Console + true + + + + + Level3 + + + MaxSpeed + true + true + _CRT_SECURE_NO_WARNINGS;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + ../src/include;. + + + Console + true + true + true + + + + + Level3 + + + MaxSpeed + true + true + _CRT_SECURE_NO_WARNINGS;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + ../src/include;. + + + Console + true + true + true + + + + + {70d178d8-1100-4152-86c0-809a91cff832} + + + + + + + + + + \ No newline at end of file diff --git a/libs/libks/test/testq.c b/libs/libks/test/testq.c new file mode 100644 index 0000000000..90f129755a --- /dev/null +++ b/libs/libks/test/testq.c @@ -0,0 +1,218 @@ +#include "ks.h" +#include "tap.h" +#define MAX 200 + +static void *test1_thread(ks_thread_t *thread, void *data) +{ + ks_q_t *q = (ks_q_t *) data; + void *pop; + + while(ks_q_pop(q, &pop) == KS_STATUS_SUCCESS) { + //int *i = (int *)pop; + //printf("POP %d\n", *i); + ks_pool_free(thread->pool, pop); + } + + return NULL; +} + +static void do_flush(ks_q_t *q, void *ptr, void *flush_data) +{ + ks_pool_t *pool = (ks_pool_t *)flush_data; + ks_pool_free(pool, ptr); + +} + +int qtest1(int loops) +{ + ks_thread_t *thread; + ks_q_t *q; + ks_pool_t *pool; + int i; + + ks_pool_open(&pool); + ks_q_create(&q, pool, loops); + + ks_thread_create(&thread, test1_thread, q, pool); + + for (i = 0; i < 10000; i++) { + int *val = (int *)ks_pool_alloc(pool, sizeof(int)); + *val = i; + ks_q_push(q, val); + } + + ks_q_wait(q); + + ks_q_term(q); + ks_thread_join(thread); + + ks_q_destroy(&q); + + ks_q_create(&q, pool, loops); + ks_q_set_flush_fn(q, do_flush, pool); + + for (i = 0; i < loops; i++) { + int *val = (int *)ks_pool_alloc(pool, sizeof(int)); + *val = i; + ks_q_push(q, val); + } + + ks_q_destroy(&q); + + ks_pool_close(&pool); + + return 1; + +} + +struct test2_data { + ks_q_t *q; + int try; + int ready; + int running; +}; + +static void *test2_thread(ks_thread_t *thread, void *data) +{ + struct test2_data *t2 = (struct test2_data *) data; + void *pop; + ks_status_t status; + int popped = 0; + + while (t2->running && (t2->try && !t2->ready)) { + ks_sleep(10000); + continue; + } + + while (t2->running || ks_q_size(t2->q)) { + if (t2->try) { + status = ks_q_trypop(t2->q, &pop); + } else { + status = ks_q_pop(t2->q, &pop); + } + + if (status == KS_STATUS_SUCCESS) { + //int *i = (int *)pop; + //printf("%p POP %d\n", (void *)pthread_self(), *i); + popped++; + ks_pool_free(thread->pool, pop); + } else if (status == KS_STATUS_INACTIVE) { + break; + } else if (t2->try && ks_q_size(t2->q)) { + int s = rand() % 100; + ks_sleep(s * 1000); + } + } + + return (void *) (intptr_t)popped; +} + +ks_size_t qtest2(int ttl, int try, int loops) +{ + ks_thread_t *threads[MAX]; + ks_q_t *q; + ks_pool_t *pool; + int i; + struct test2_data t2 = { 0 }; + ks_size_t r; + int dropped = 0; + int qlen = loops / 2; + int total_popped = 0; + + ks_pool_open(&pool); + ks_q_create(&q, pool, qlen); + + t2.q = q; + t2.try = try; + t2.running = 1; + + for (i = 0; i < ttl; i++) { + ks_thread_create(&threads[i], test2_thread, &t2, pool); + } + + //ks_sleep(loops00); + + for (i = 0; i < loops; i++) { + int *val = (int *)ks_pool_alloc(pool, sizeof(int)); + *val = i; + if (try > 1) { + if (ks_q_trypush(q, val) != KS_STATUS_SUCCESS) { + dropped++; + } + } else { + ks_q_push(q, val); + } + if (i > qlen / 2) { + t2.ready = 1; + } + } + + t2.running = 0; + + if (!try) { + ks_q_wait(q); + ks_q_term(q); + } + + for (i = 0; i < ttl; i++) { + int popped; + ks_thread_join(threads[i]); + popped = (int)(intptr_t)threads[i]->return_data; + if (popped) { + printf("%d/%d POPPED %d\n", i, ttl, popped); + } + total_popped += popped; + } + + r = ks_q_size(q); + ks_assert(r == 0); + + ks_q_destroy(&q); + + + + printf("TOTAL POPPED: %d DROPPED %d SUM: %d\n", total_popped, dropped, total_popped + dropped);fflush(stdout); + + if (try < 2) { + ks_assert(total_popped == loops); + } else { + ks_assert(total_popped + dropped == loops); + } + + ks_pool_close(&pool); + + return r; + +} + + +int main(int argc, char **argv) +{ + int ttl; + int size = 100000; + int runs = 1; + int i; + + ks_init(); + + srand((unsigned)(time(NULL) - (unsigned)(intptr_t)ks_thread_self())); + + plan(4 * runs); + + ttl = ks_cpu_count() * 5; + //ttl = 5; + + + for(i = 0; i < runs; i++) { + ok(qtest1(size)); + ok(qtest2(ttl, 0, size) == 0); + ok(qtest2(ttl, 1, size) == 0); + ok(qtest2(ttl, 2, size) == 0); + } + + printf("TTL %d RUNS %d\n", ttl, runs); + + ks_shutdown(); + + done_testing(); +} diff --git a/libs/libks/test/testq.vcxproj b/libs/libks/test/testq.vcxproj new file mode 100644 index 0000000000..8bc112dec0 --- /dev/null +++ b/libs/libks/test/testq.vcxproj @@ -0,0 +1,170 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + {3F8E0DF3-F402-40E0-8D78-44A094625D25} + Win32Proj + testq + 8.1 + + + + Application + true + v140_xp + Unicode + + + Application + false + v140_xp + true + Unicode + + + Application + true + v140_xp + Unicode + + + Application + false + v140_xp + true + Unicode + + + + + + + + + + + + + + + + + + + + + true + $(Platform)\$(Configuration)\$(ProjectName)\ + $(SolutionDir)$(Platform)\$(Configuration)\ + + + true + $(Platform)\$(Configuration)\$(ProjectName)\ + + + false + $(Platform)\$(Configuration)\$(ProjectName)\ + $(SolutionDir)$(Platform)\$(Configuration)\ + + + false + $(Platform)\$(Configuration)\$(ProjectName)\ + + + + + + Level3 + Disabled + _CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + ../src/include;. + + + Console + true + + + + + + + Level3 + Disabled + _CRT_SECURE_NO_WARNINGS;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + ../src/include;. + + + Console + true + + + + + Level3 + + + MaxSpeed + true + true + _CRT_SECURE_NO_WARNINGS;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + ../src/include;. + + + Console + true + true + true + + + + + Level3 + + + MaxSpeed + true + true + _CRT_SECURE_NO_WARNINGS;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + ../src/include;. + + + Console + true + true + true + + + + + {70d178d8-1100-4152-86c0-809a91cff832} + + + + + + + + + + \ No newline at end of file diff --git a/libs/libks/test/testsock.c b/libs/libks/test/testsock.c new file mode 100644 index 0000000000..f825768b70 --- /dev/null +++ b/libs/libks/test/testsock.c @@ -0,0 +1,406 @@ +#include +#include + +static char v4[48] = ""; +static char v6[48] = ""; +static int mask = 0; +static int tcp_port = 8090; +static int udp_cl_port = 9090; +static int udp_sv_port = 9091; + +static char __MSG[] = "TESTING................................................................................/TESTING"; + +struct tcp_data { + ks_socket_t sock; + ks_sockaddr_t addr; + int ready; + char *ip; +}; + +void server_callback(ks_socket_t server_sock, ks_socket_t client_sock, ks_sockaddr_t *addr, void *user_data) +{ + //struct tcp_data *tcp_data = (struct tcp_data *) user_data; + char buf[8192] = ""; + ks_status_t status; + ks_size_t bytes; + + printf("TCP SERVER SOCK %d connection from %s:%u\n", (int)server_sock, addr->host, addr->port); + + do { + bytes = sizeof(buf);; + status = ks_socket_recv(client_sock, buf, &bytes); + if (status != KS_STATUS_SUCCESS) { + printf("TCP SERVER BAIL %s\n", strerror(ks_errno())); + break; + } + printf("TCP SERVER READ %ld bytes [%s]\n", (long)bytes, buf); + } while(zstr_buf(buf) || strcmp(buf, __MSG)); + + bytes = strlen(buf); + status = ks_socket_send(client_sock, buf, &bytes); + printf("TCP SERVER WRITE %ld bytes\n", (long)bytes); + + ks_socket_close(&client_sock); + + printf("TCP SERVER COMPLETE\n"); +} + + + +static void *tcp_sock_server(ks_thread_t *thread, void *thread_data) +{ + struct tcp_data *tcp_data = (struct tcp_data *) thread_data; + + tcp_data->ready = 1; + ks_listen_sock(tcp_data->sock, &tcp_data->addr, 0, server_callback, tcp_data); + + printf("TCP THREAD DONE\n"); + + return NULL; +} + +static int test_addr(int v) +{ + ks_sockaddr_t addr1, addr2, addr3, addr4, addr5; + + printf("TESTING ADDR v%d\n", v); + + if (v == 4) { + if (ks_addr_set(&addr1, "10.100.200.5", 2467, AF_INET) != KS_STATUS_SUCCESS) { + return 0; + } + + if (strcmp(addr1.host, "10.100.200.5")) { + return 0; + } + + if (ks_addr_set(&addr2, "10.100.200.5", 2467, AF_INET) != KS_STATUS_SUCCESS) { + return 0; + } + + if (ks_addr_set(&addr3, "10.100.200.5", 1234, AF_INET) != KS_STATUS_SUCCESS) { + return 0; + } + + if (ks_addr_set(&addr4, "10.199.200.5", 2467, AF_INET) != KS_STATUS_SUCCESS) { + return 0; + } + + } else { + if (ks_addr_set(&addr1, "1607:f418:1210::1", 2467, AF_INET6) != KS_STATUS_SUCCESS) { + return 0; + } + + if (strcmp(addr1.host, "1607:f418:1210::1")) { + return 0; + } + + if (ks_addr_set(&addr2, "1607:f418:1210::1", 2467, AF_INET6) != KS_STATUS_SUCCESS) { + return 0; + } + + if (ks_addr_set(&addr3, "1607:f418:1210::1", 1234, AF_INET6) != KS_STATUS_SUCCESS) { + return 0; + } + + if (ks_addr_set(&addr4, "1337:a118:1306::1", 2467, AF_INET6) != KS_STATUS_SUCCESS) { + return 0; + } + } + + + if (ks_addr_copy(&addr5, &addr4) != KS_STATUS_SUCCESS) { + return 0; + } + + if (!ks_addr_cmp(&addr1, &addr2)) { + return 0; + } + + if (ks_addr_cmp(&addr1, &addr3)) { + return 0; + } + + if (ks_addr_cmp(&addr1, &addr4)) { + return 0; + } + + if (!ks_addr_cmp(&addr4, &addr5)) { + return 0; + } + + if (ks_addr_cmp(&addr1, &addr5)) { + return 0; + } + + + return 1; +} + +static int test_tcp(char *ip) +{ + ks_thread_t *thread_p = NULL; + ks_pool_t *pool; + ks_sockaddr_t addr; + int family = AF_INET; + ks_socket_t cl_sock = KS_SOCK_INVALID; + char buf[8192] = ""; + struct tcp_data tcp_data = { 0 }; + int r = 1, sanity = 100; + + ks_pool_open(&pool); + + if (strchr(ip, ':')) { + family = AF_INET6; + } + + if (ks_addr_set(&tcp_data.addr, ip, tcp_port, family) != KS_STATUS_SUCCESS) { + r = 0; + printf("TCP CLIENT Can't set ADDR\n"); + goto end; + } + + if ((tcp_data.sock = socket(family, SOCK_STREAM, IPPROTO_TCP)) == KS_SOCK_INVALID) { + r = 0; + printf("TCP CLIENT Can't create sock family %d\n", family); + goto end; + } + + ks_socket_option(tcp_data.sock, SO_REUSEADDR, KS_TRUE); + ks_socket_option(tcp_data.sock, TCP_NODELAY, KS_TRUE); + + tcp_data.ip = ip; + + ks_thread_create(&thread_p, tcp_sock_server, &tcp_data, pool); + + while(!tcp_data.ready && --sanity > 0) { + ks_sleep(10000); + } + + ks_addr_set(&addr, ip, tcp_port, family); + cl_sock = ks_socket_connect(SOCK_STREAM, IPPROTO_TCP, &addr); + + int x; + + printf("TCP CLIENT SOCKET %d %s %d\n", (int)cl_sock, addr.host, addr.port); + + x = write((int)cl_sock, __MSG, (unsigned)strlen(__MSG)); + printf("TCP CLIENT WRITE %d bytes\n", x); + + x = read((int)cl_sock, buf, sizeof(buf)); + printf("TCP CLIENT READ %d bytes [%s]\n", x, buf); + + end: + + if (tcp_data.sock != KS_SOCK_INVALID) { + ks_socket_shutdown(tcp_data.sock, 2); + ks_socket_close(&tcp_data.sock); + } + + if (thread_p) { + ks_thread_join(thread_p); + } + + ks_socket_close(&cl_sock); + + ks_pool_close(&pool); + + return r; +} + + +struct udp_data { + int ready; + char *ip; + ks_socket_t sv_sock; +}; + +static void *udp_sock_server(ks_thread_t *thread, void *thread_data) +{ + struct udp_data *udp_data = (struct udp_data *) thread_data; + int family = AF_INET; + ks_status_t status; + ks_sockaddr_t addr, remote_addr = KS_SA_INIT; + char buf[8192] = ""; + ks_size_t bytes; + + udp_data->sv_sock = KS_SOCK_INVALID; + + if (strchr(udp_data->ip, ':')) { + family = AF_INET6; + } + + ks_addr_set(&addr, udp_data->ip, udp_sv_port, family); + remote_addr.family = family; + + if ((udp_data->sv_sock = socket(family, SOCK_DGRAM, IPPROTO_UDP)) == KS_SOCK_INVALID) { + printf("UDP SERVER SOCKET ERROR %s\n", strerror(ks_errno())); + goto end; + } + + ks_socket_option(udp_data->sv_sock, SO_REUSEADDR, KS_TRUE); + + if (ks_addr_bind(udp_data->sv_sock, &addr) != KS_STATUS_SUCCESS) { + printf("UDP SERVER BIND ERROR %s\n", strerror(ks_errno())); + goto end; + } + + udp_data->ready = 1; + + printf("UDP SERVER SOCKET %d %s %d\n", (int)(udp_data->sv_sock), addr.host, addr.port); + bytes = sizeof(buf); + if ((status = ks_socket_recvfrom(udp_data->sv_sock, buf, &bytes, &remote_addr)) != KS_STATUS_SUCCESS) { + printf("UDP SERVER RECVFROM ERR %s\n", strerror(ks_errno())); + goto end; + } + printf("UDP SERVER READ %ld bytes [%s]\n", (long)bytes, buf); + + if (strcmp(buf, __MSG)) { + printf("INVALID MESSAGE\n"); + goto end; + } + + printf("UDP SERVER WAIT 2 seconds to test nonblocking sockets\n"); + ks_sleep(2000000); + printf("UDP SERVER RESPOND TO %d %s %d\n", (int)(udp_data->sv_sock), remote_addr.host, remote_addr.port); + bytes = strlen(buf); + if ((status = ks_socket_sendto(udp_data->sv_sock, buf, &bytes, &remote_addr)) != KS_STATUS_SUCCESS) { + printf("UDP SERVER SENDTO ERR %s\n", strerror(ks_errno())); + goto end; + } + printf("UDP SERVER WRITE %ld bytes [%s]\n", (long)bytes, buf); + + + end: + + udp_data->ready = -1; + printf("UDP THREAD DONE\n"); + + ks_socket_close(&udp_data->sv_sock); + + return NULL; +} + + +static int test_udp(char *ip) +{ + ks_thread_t *thread_p = NULL; + ks_pool_t *pool; + ks_sockaddr_t addr, remote_addr; + int family = AF_INET; + ks_socket_t cl_sock = KS_SOCK_INVALID; + char buf[8192] = ""; + int r = 1, sanity = 100; + struct udp_data udp_data = { 0 }; + ks_size_t bytes = 0; + ks_status_t status; + + ks_pool_open(&pool); + + if (strchr(ip, ':')) { + family = AF_INET6; + } + + ks_addr_set(&addr, ip, udp_cl_port, family); + + if ((cl_sock = socket(family, SOCK_DGRAM, IPPROTO_UDP)) == KS_SOCK_INVALID) { + printf("UDP CLIENT SOCKET ERROR %s\n", strerror(ks_errno())); + r = 0; goto end; + } + + ks_socket_option(cl_sock, SO_REUSEADDR, KS_TRUE); + + if (ks_addr_bind(cl_sock, &addr) != KS_STATUS_SUCCESS) { + printf("UDP CLIENT BIND ERROR %s\n", strerror(ks_errno())); + r = 0; goto end; + } + + ks_addr_set(&remote_addr, ip, udp_sv_port, family); + + udp_data.ip = ip; + ks_thread_create(&thread_p, udp_sock_server, &udp_data, pool); + + while(!udp_data.ready && --sanity > 0) { + ks_sleep(10000); + } + + printf("UDP CLIENT SOCKET %d %s %d -> %s %d\n", (int)cl_sock, addr.host, addr.port, remote_addr.host, remote_addr.port); + + bytes = strlen(__MSG); + if ((status = ks_socket_sendto(cl_sock, __MSG, &bytes, &remote_addr)) != KS_STATUS_SUCCESS) { + printf("UDP CLIENT SENDTO ERR %s\n", strerror(ks_errno())); + r = 0; goto end; + } + + printf("UDP CLIENT WRITE %ld bytes\n", (long)bytes); + ks_socket_option(cl_sock, KS_SO_NONBLOCK, KS_TRUE); + + sanity = 300; + do { + status = ks_socket_recvfrom(cl_sock, buf, &bytes, &remote_addr); + + if (status == KS_STATUS_BREAK && --sanity > 0) { + if ((sanity % 50) == 0) printf("UDP CLIENT SLEEP NONBLOCKING\n"); + ks_sleep(10000); + } else if (status != KS_STATUS_SUCCESS) { + printf("UDP CLIENT RECVFROM ERR %s\n", strerror(ks_errno())); + r = 0; goto end; + } + } while(status != KS_STATUS_SUCCESS); + printf("UDP CLIENT READ %ld bytes\n", (long)bytes); + + end: + + if (thread_p) { + ks_thread_join(thread_p); + } + + if (udp_data.ready > 0 && udp_data.sv_sock && ks_socket_valid(udp_data.sv_sock)) { + ks_socket_shutdown(udp_data.sv_sock, 2); + ks_socket_close(&udp_data.sv_sock); + } + + + + ks_socket_close(&cl_sock); + + ks_pool_close(&pool); + + return r; +} + + +int main(void) +{ + int have_v4 = 0, have_v6 = 0; + + ks_init(); + + ks_find_local_ip(v4, sizeof(v4), &mask, AF_INET, NULL); + ks_find_local_ip(v6, sizeof(v6), NULL, AF_INET6, NULL); + + printf("IPS: v4: [%s] v6: [%s]\n", v4, v6); + + have_v4 = zstr_buf(v4) ? 0 : 1; + have_v6 = zstr_buf(v6) ? 0 : 1; + + plan((have_v4 * 3) + (have_v6 * 3) + 1); + + ok(have_v4 || have_v6); + + if (have_v4) { + ok(test_tcp(v4)); + ok(test_udp(v4)); + ok(test_addr(4)); + } + + if (have_v6) { + ok(test_tcp(v6)); + ok(test_udp(v6)); + ok(test_addr(6)); + } + + ks_shutdown(); + + done_testing(); +} diff --git a/libs/libks/test/testsock.vcxproj b/libs/libks/test/testsock.vcxproj new file mode 100644 index 0000000000..b7eefcdeba --- /dev/null +++ b/libs/libks/test/testsock.vcxproj @@ -0,0 +1,170 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + {5DC38E2B-0512-4140-8A1B-59952A5DC9CB} + Win32Proj + testsock + 8.1 + + + + Application + true + v140_xp + Unicode + + + Application + false + v140_xp + true + Unicode + + + Application + true + v140_xp + Unicode + + + Application + false + v140_xp + true + Unicode + + + + + + + + + + + + + + + + + + + + + true + $(Platform)\$(Configuration)\$(ProjectName)\ + $(SolutionDir)$(Platform)\$(Configuration)\ + + + true + $(Platform)\$(Configuration)\$(ProjectName)\ + + + false + $(Platform)\$(Configuration)\$(ProjectName)\ + $(SolutionDir)$(Platform)\$(Configuration)\ + + + false + $(Platform)\$(Configuration)\$(ProjectName)\ + + + + + + Level3 + Disabled + _CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + ../src/include;. + + + Console + true + + + + + + + Level3 + Disabled + _CRT_SECURE_NO_WARNINGS;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + ../src/include;. + + + Console + true + + + + + Level3 + + + MaxSpeed + true + true + _CRT_SECURE_NO_WARNINGS;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + ../src/include;. + + + Console + true + true + true + + + + + Level3 + + + MaxSpeed + true + true + _CRT_SECURE_NO_WARNINGS;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + ../src/include;. + + + Console + true + true + true + + + + + {70d178d8-1100-4152-86c0-809a91cff832} + + + + + + + + + + \ No newline at end of file diff --git a/libs/libks/test/testsock2.c b/libs/libks/test/testsock2.c new file mode 100644 index 0000000000..e07eb28d7e --- /dev/null +++ b/libs/libks/test/testsock2.c @@ -0,0 +1,20 @@ +#include +#include + +int main(int argc, char *argv[]) +{ + char ip[80] = ""; + + ks_init(); + + if (argc > 1) { + ks_ip_route(ip, sizeof(ip), argv[1]); + printf("IPS [%s]\n", ip); + } else { + fprintf(stderr, "Missing arg \n"); + } + + ks_shutdown(); + + done_testing(); +} diff --git a/libs/libks/test/testthreadmutex.c b/libs/libks/test/testthreadmutex.c new file mode 100644 index 0000000000..38e697f16b --- /dev/null +++ b/libs/libks/test/testthreadmutex.c @@ -0,0 +1,351 @@ +#include "ks.h" +#include "tap.h" +#define MAX_STUFF 200 + +static ks_thread_t *threads[MAX_STUFF]; +static ks_thread_t *thread_p; + +static ks_pool_t *pool; +static ks_mutex_t *mutex; +static ks_mutex_t *mutex_non_recursive; +static ks_rwl_t *rwlock; +static ks_cond_t *cond; +static int counter1 = 0; +static int counter2 = 0; +static int counter3 = 0; +static int counter4 = 0; +static int counter5 = 0; +static int counter6 = 0; +static int threadscount = 0; +static int cpu_count = 0; + +#define LOOP_COUNT 10000 + +static void *thread_priority(ks_thread_t *thread, void *data) +{ + while (thread->running) { + ks_sleep(1000000); + } + + return NULL; +} + +static void *thread_test_cond_producer_func(ks_thread_t *thread, void *data) +{ + for (;;) { + ks_cond_lock(cond); + if (counter5 >= LOOP_COUNT) { + ks_cond_unlock(cond); + break; + } + counter5++; + if (counter6 == 0) { + ks_cond_signal(cond); + } + counter6++; + ks_cond_unlock(cond); + *((int *) data) += 1; + } + + return NULL; +} + +static void *thread_test_cond_consumer_func(ks_thread_t *thread, void *data) +{ + int i; + + for (i = 0; i < LOOP_COUNT; i++) { + ks_cond_lock(cond); + while (counter6 == 0) { + ks_cond_wait(cond); + } + counter6--; + ks_cond_unlock(cond); + } + return NULL; +} + +static void check_cond(void) +{ + int count[MAX_STUFF] = { 0 }; + int ttl = 0; + + ok( (ks_pool_open(&pool) == KS_STATUS_SUCCESS) ); + ok( (ks_cond_create(&cond, pool) == KS_STATUS_SUCCESS) ); + + int i; + for(i = 0; i < cpu_count; i++) { + ok( (ks_thread_create(&threads[i], thread_test_cond_producer_func, &count[i], pool) == KS_STATUS_SUCCESS) ); + } + ok( (ks_thread_create(&thread_p, thread_test_cond_consumer_func, NULL, pool) == KS_STATUS_SUCCESS) ); + ok( (ks_pool_close(&pool) == KS_STATUS_SUCCESS) ); + for(i = 0; i < cpu_count; i++) { + ttl += count[i]; + } + + ok( (ttl == LOOP_COUNT) ); +} + + +static void *thread_test_rwlock_func(ks_thread_t *thread, void *data) +{ + int loop = 1; + + while (1) + { + ks_rwl_read_lock(rwlock); + if (counter4 == LOOP_COUNT) { + loop = 0; + } + ks_rwl_read_unlock(rwlock); + + if (!loop) { + break; + } + + ks_rwl_write_lock(rwlock); + if (counter4 != LOOP_COUNT) { + counter4++; + } + ks_rwl_write_unlock(rwlock); + } + return NULL; +} + +static void check_rwl(void) +{ + ks_status_t status; + + ok( (ks_pool_open(&pool) == KS_STATUS_SUCCESS) ); + ok( (ks_rwl_create(&rwlock, pool) == KS_STATUS_SUCCESS) ); + ks_rwl_read_lock(rwlock); + status = ks_rwl_try_read_lock(rwlock); + ok( status == KS_STATUS_SUCCESS ); + if ( status == KS_STATUS_SUCCESS ) { + ks_rwl_read_unlock(rwlock); + } + ks_rwl_read_unlock(rwlock); + + int i; + for(i = 0; i < cpu_count; i++) { + ok( (ks_thread_create(&threads[i], thread_test_rwlock_func, NULL, pool) == KS_STATUS_SUCCESS) ); + } + + + for(i = 0; i < cpu_count; i++) { + ks_thread_join(threads[i]); + } + + ok( (ks_pool_close(&pool) == KS_STATUS_SUCCESS) ); + ok( (counter4 == LOOP_COUNT) ); + +} + +static void *thread_test_function_cleanup(ks_thread_t *thread, void *data) +{ + int d = (int)(intptr_t)data; + + while (thread->running) { + ks_sleep(1000000); + } + + if ( d == 1 ) { + ks_mutex_lock(mutex); + counter3++; + ks_mutex_unlock(mutex); + } + + return NULL; +} + +static void *thread_test_function_detatched(ks_thread_t *thread, void *data) +{ + int i; + int d = (int)(intptr_t)data; + + for (i = 0; i < LOOP_COUNT; i++) { + ks_mutex_lock(mutex); + if (d == 1) { + counter2++; + } + ks_mutex_unlock(mutex); + } + ks_mutex_lock(mutex); + threadscount++; + ks_mutex_unlock(mutex); + + return NULL; +} + +static void *thread_test_function_atatched(ks_thread_t *thread, void *data) +{ + int i; + int d = (int)(intptr_t)data; + void *mem, *last_mem = NULL; + + for (i = 0; i < LOOP_COUNT; i++) { + if (last_mem) { + ks_pool_safe_free(thread->pool, last_mem); + } + mem = ks_pool_alloc(thread->pool, 1024); + last_mem = mem; + } + + for (i = 0; i < LOOP_COUNT; i++) { + ks_mutex_lock(mutex); + if (d == 1) { + counter1++; + } + ks_mutex_unlock(mutex); + } + + return NULL; +} + +static void create_threads_cleanup(void) +{ + void *d = (void *)(intptr_t)1; + int i; + for(i = 0; i < cpu_count; i++) { + ok( (ks_thread_create(&threads[i], thread_test_function_cleanup, d, pool) == KS_STATUS_SUCCESS) ); + } + +} + +static void create_threads_atatched(void) +{ + void *d = (void *)(intptr_t)1; + + int i; + for(i = 0; i < cpu_count; i++) { + ok( (ks_thread_create(&threads[i], thread_test_function_atatched, d, pool) == KS_STATUS_SUCCESS) ); + } +} + +static void create_threads_detatched(void) +{ + ks_status_t status; + void *d = (void *)(intptr_t)1; + + int i; + for(i = 0; i < cpu_count; i++) { + status = ks_thread_create_ex(&threads[i], thread_test_function_detatched, d, KS_THREAD_FLAG_DETATCHED, KS_THREAD_DEFAULT_STACK, KS_PRI_NORMAL, pool); + ok( status == KS_STATUS_SUCCESS ); + } +} + +static void check_thread_priority(void) +{ + ks_status_t status; + void *d = (void *)(intptr_t)1; + + status = ks_thread_create_ex(&thread_p, thread_priority, d, KS_THREAD_FLAG_DETATCHED, KS_THREAD_DEFAULT_STACK, KS_PRI_IMPORTANT, pool); + ok( status == KS_STATUS_SUCCESS ); + ks_sleep(1000000); + todo("Add check to see if has permission to set thread priority\n"); + ok( ks_thread_priority(thread_p) == KS_PRI_IMPORTANT ); + end_todo; + + ks_pool_free(pool, thread_p); +} + +static void join_threads(void) +{ + int i; + for(i = 0; i < cpu_count; i++) { + ok( (KS_STATUS_SUCCESS == ks_thread_join(threads[i])) ); + } +} + +static void check_atatched(void) +{ + ok( counter1 == (LOOP_COUNT * cpu_count) ); +} + +static void check_detached(void) +{ + ok( counter2 == (LOOP_COUNT * cpu_count) ); +} + +static void create_pool(void) +{ + ok( (ks_pool_open(&pool) == KS_STATUS_SUCCESS) ); +} + +static void check_cleanup(void) +{ + ok( (counter3 == cpu_count) ); +} + +static void check_pool_close(void) +{ + ok( (ks_pool_close(&pool) == KS_STATUS_SUCCESS) ); +} + +static void create_mutex(void) +{ + ok( (ks_mutex_create(&mutex, KS_MUTEX_FLAG_DEFAULT, pool) == KS_STATUS_SUCCESS) ); +} + +static void create_mutex_non_recursive(void) +{ + ok( (ks_mutex_create(&mutex_non_recursive, KS_MUTEX_FLAG_NON_RECURSIVE, pool) == KS_STATUS_SUCCESS) ); +} + +static void test_recursive_mutex(void) +{ + ks_status_t status; + + ks_mutex_lock(mutex); + status = ks_mutex_trylock(mutex); + if (status == KS_STATUS_SUCCESS) { + ks_mutex_unlock(mutex); + } + ok(status == KS_STATUS_SUCCESS); + ks_mutex_unlock(mutex); +} + +static void test_non_recursive_mutex(void) +{ + ks_status_t status; + ks_mutex_lock(mutex_non_recursive); + status = ks_mutex_trylock(mutex_non_recursive); + if (status == KS_STATUS_SUCCESS) { + ks_mutex_unlock(mutex_non_recursive); + } + ok(status != KS_STATUS_SUCCESS); + ks_mutex_unlock(mutex_non_recursive); +} + + +int main(int argc, char **argv) +{ + ks_init(); + cpu_count = ks_cpu_count() * 4; + + plan(21 + cpu_count * 6); + + + diag("Starting testing for %d tests\n", 44); + + create_pool(); + create_mutex(); + create_mutex_non_recursive(); + test_recursive_mutex(); + test_non_recursive_mutex(); + check_thread_priority(); + create_threads_atatched(); + join_threads(); + check_atatched(); + create_threads_detatched(); + while (threadscount != cpu_count) ks_sleep(1000000); + check_detached(); + create_threads_cleanup(); + check_pool_close(); + check_cleanup(); + check_rwl(); + check_cond(); + + ks_shutdown(); + done_testing(); +} diff --git a/libs/libks/test/testthreadmutex.vcxproj b/libs/libks/test/testthreadmutex.vcxproj new file mode 100644 index 0000000000..a2b548c104 --- /dev/null +++ b/libs/libks/test/testthreadmutex.vcxproj @@ -0,0 +1,170 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + {AE572500-7266-4692-ACA4-5E37B7B4409A} + Win32Proj + testthreadmutex + 8.1 + + + + Application + true + v140_xp + Unicode + + + Application + false + v140_xp + true + Unicode + + + Application + true + v140_xp + Unicode + + + Application + false + v140_xp + true + Unicode + + + + + + + + + + + + + + + + + + + + + true + $(Platform)\$(Configuration)\$(ProjectName)\ + $(SolutionDir)$(Platform)\$(Configuration)\ + + + true + $(Platform)\$(Configuration)\$(ProjectName)\ + + + false + $(Platform)\$(Configuration)\$(ProjectName)\ + $(SolutionDir)$(Platform)\$(Configuration)\ + + + false + $(Platform)\$(Configuration)\$(ProjectName)\ + + + + + + Level3 + Disabled + _CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + ../src/include;. + + + Console + true + + + + + + + Level3 + Disabled + _CRT_SECURE_NO_WARNINGS;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + ../src/include;. + + + Console + true + + + + + Level3 + + + MaxSpeed + true + true + _CRT_SECURE_NO_WARNINGS;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + ../src/include;. + + + Console + true + true + true + + + + + Level3 + + + MaxSpeed + true + true + _CRT_SECURE_NO_WARNINGS;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + ../src/include;. + + + Console + true + true + true + + + + + {70d178d8-1100-4152-86c0-809a91cff832} + + + + + + + + + + \ No newline at end of file diff --git a/libs/libks/test/testtime.c b/libs/libks/test/testtime.c new file mode 100644 index 0000000000..0b3a3c0467 --- /dev/null +++ b/libs/libks/test/testtime.c @@ -0,0 +1,45 @@ +#include "ks.h" +#include "tap.h" + +int main(int argc, char **argv) +{ + int64_t now, then; + int diff; + int i; + + ks_init(); + + plan(2); + + then = ks_time_now(); + + ks_sleep(2000000); + + now = ks_time_now(); + + diff = (int)((now - then) / 1000); + printf("DIFF %ums\n", diff); + + + ok( diff > 1990 && diff < 2010 ); + + then = ks_time_now(); + + for (i = 0; i < 100; i++) { + ks_sleep(20000); + } + + now = ks_time_now(); + + diff = (int)((now - then) / 1000); + printf("DIFF %ums\n", diff); + +#if defined(__APPLE__) + /* the clock on osx seems to be particularly bad at being accurate, we need a bit more room for error*/ + ok( diff > 1900 && diff < 2100 ); +#else + ok( diff > 1950 && diff < 2050 ); +#endif + ks_shutdown(); + done_testing(); +} diff --git a/libs/libks/test/testtime.vcxproj b/libs/libks/test/testtime.vcxproj new file mode 100644 index 0000000000..c667f35f7d --- /dev/null +++ b/libs/libks/test/testtime.vcxproj @@ -0,0 +1,170 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + {B74812A1-C67D-4568-AF84-26CE2004D8BF} + Win32Proj + testtime + 8.1 + + + + Application + true + v140_xp + Unicode + + + Application + false + v140_xp + true + Unicode + + + Application + true + v140_xp + Unicode + + + Application + false + v140_xp + true + Unicode + + + + + + + + + + + + + + + + + + + + + true + $(Platform)\$(Configuration)\$(ProjectName)\ + $(SolutionDir)$(Platform)\$(Configuration)\ + + + true + $(Platform)\$(Configuration)\$(ProjectName)\ + + + false + $(Platform)\$(Configuration)\$(ProjectName)\ + $(SolutionDir)$(Platform)\$(Configuration)\ + + + false + $(Platform)\$(Configuration)\$(ProjectName)\ + + + + + + Level3 + Disabled + _CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + ../src/include;. + + + Console + true + + + + + + + Level3 + Disabled + _CRT_SECURE_NO_WARNINGS;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + ../src/include;. + + + Console + Debug + + + + + Level3 + + + MaxSpeed + true + true + _CRT_SECURE_NO_WARNINGS;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + ../src/include;. + + + Console + true + true + true + + + + + Level3 + + + MaxSpeed + true + true + _CRT_SECURE_NO_WARNINGS;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + ../src/include;. + + + Console + true + true + true + + + + + {70d178d8-1100-4152-86c0-809a91cff832} + + + + + + + + + + \ No newline at end of file diff --git a/libs/libks/test/testwebsock.c b/libs/libks/test/testwebsock.c new file mode 100644 index 0000000000..da57737399 --- /dev/null +++ b/libs/libks/test/testwebsock.c @@ -0,0 +1,284 @@ +#include +#include + + +static char v4[48] = ""; +static char v6[48] = ""; +static int mask = 0; +static int tcp_port = 8090; + + +static char __MSG[] = "TESTING................................................................................/TESTING"; + + +typedef struct ssl_profile_s { + const SSL_METHOD *ssl_method; + SSL_CTX *ssl_ctx; + char cert[512]; + char key[512]; + char chain[512]; +} ssl_profile_t; + +static int init_ssl(ssl_profile_t *profile) +{ + const char *err = ""; + + profile->ssl_ctx = SSL_CTX_new(profile->ssl_method); /* create context */ + assert(profile->ssl_ctx); + + /* Disable SSLv2 */ + SSL_CTX_set_options(profile->ssl_ctx, SSL_OP_NO_SSLv2); + /* Disable SSLv3 */ + SSL_CTX_set_options(profile->ssl_ctx, SSL_OP_NO_SSLv3); + /* Disable TLSv1 */ + SSL_CTX_set_options(profile->ssl_ctx, SSL_OP_NO_TLSv1); + /* Disable Compression CRIME (Compression Ratio Info-leak Made Easy) */ + SSL_CTX_set_options(profile->ssl_ctx, SSL_OP_NO_COMPRESSION); + + /* set the local certificate from CertFile */ + if (!zstr(profile->chain)) { + if (!SSL_CTX_use_certificate_chain_file(profile->ssl_ctx, profile->chain)) { + err = "CERT CHAIN FILE ERROR"; + goto fail; + } + } + + if (!SSL_CTX_use_certificate_file(profile->ssl_ctx, profile->cert, SSL_FILETYPE_PEM)) { + err = "CERT FILE ERROR"; + goto fail; + } + + /* set the private key from KeyFile */ + + if (!SSL_CTX_use_PrivateKey_file(profile->ssl_ctx, profile->key, SSL_FILETYPE_PEM)) { + err = "PRIVATE KEY FILE ERROR"; + goto fail; + } + + /* verify private key */ + if ( !SSL_CTX_check_private_key(profile->ssl_ctx) ) { + err = "PRIVATE KEY FILE ERROR"; + goto fail; + } + + SSL_CTX_set_cipher_list(profile->ssl_ctx, "HIGH:!DSS:!aNULL@STRENGTH"); + + return 1; + + fail: + ks_log(KS_LOG_ERROR, "SSL ERR: %s\n", err); + + return 0; + +} + +struct tcp_data { + ks_socket_t sock; + ks_sockaddr_t addr; + int ready; + char *ip; + ks_pool_t *pool; + int ssl; + ssl_profile_t client_profile; + ssl_profile_t server_profile; +}; + +void server_callback(ks_socket_t server_sock, ks_socket_t client_sock, ks_sockaddr_t *addr, void *user_data) +{ + struct tcp_data *tcp_data = (struct tcp_data *) user_data; + ks_size_t bytes; + kws_t *kws = NULL; + kws_opcode_t oc; + uint8_t *data; + + + if (tcp_data->ssl) { + tcp_data->server_profile.ssl_method = SSLv23_server_method(); + ks_set_string(tcp_data->server_profile.cert, "/tmp/testwebsock.pem"); + ks_set_string(tcp_data->server_profile.key, "/tmp/testwebsock.pem"); + ks_set_string(tcp_data->server_profile.chain, "/tmp/testwebsock.pem"); + init_ssl(&tcp_data->server_profile); + } + + printf("WS %s SERVER SOCK %d connection from %s:%u\n", tcp_data->ssl ? "SSL" : "PLAIN", (int)server_sock, addr->host, addr->port); + + if (kws_init(&kws, client_sock, tcp_data->server_profile.ssl_ctx, NULL, KWS_BLOCK, tcp_data->pool) != KS_STATUS_SUCCESS) { + printf("WS SERVER CREATE FAIL\n"); + goto end; + } + + do { + + bytes = kws_read_frame(kws, &oc, &data); + + if (bytes <= 0) { + printf("WS SERVER BAIL %s\n", strerror(ks_errno())); + break; + } + printf("WS SERVER READ %ld bytes [%s]\n", (long)bytes, (char *)data); + } while(zstr_buf((char *)data) || strcmp((char *)data, __MSG)); + + bytes = kws_write_frame(kws, WSOC_TEXT, (char *)data, strlen((char *)data)); + + printf("WS SERVER WRITE %ld bytes\n", (long)bytes); + + end: + + ks_socket_close(&client_sock); + + kws_destroy(&kws); + + if (tcp_data->ssl) { + SSL_CTX_free(tcp_data->server_profile.ssl_ctx); + } + + printf("WS SERVER COMPLETE\n"); +} + + + +static void *tcp_sock_server(ks_thread_t *thread, void *thread_data) +{ + struct tcp_data *tcp_data = (struct tcp_data *) thread_data; + + tcp_data->ready = 1; + ks_listen_sock(tcp_data->sock, &tcp_data->addr, 0, server_callback, tcp_data); + + printf("WS THREAD DONE\n"); + + return NULL; +} + + +static int test_ws(char *ip, int ssl) +{ + ks_thread_t *thread_p = NULL; + ks_pool_t *pool; + ks_sockaddr_t addr; + int family = AF_INET; + ks_socket_t cl_sock = KS_SOCK_INVALID; + struct tcp_data tcp_data = { 0 }; + int r = 1, sanity = 100; + kws_t *kws = NULL; + + ks_pool_open(&pool); + + tcp_data.pool = pool; + + if (ssl) { + tcp_data.ssl = 1; + tcp_data.client_profile.ssl_method = SSLv23_client_method(); + ks_set_string(tcp_data.client_profile.cert, "/tmp/testwebsock.pem"); + ks_set_string(tcp_data.client_profile.key, "/tmp/testwebsock.pem"); + ks_set_string(tcp_data.client_profile.chain, "/tmp/testwebsock.pem"); + init_ssl(&tcp_data.client_profile); + } + + + if (strchr(ip, ':')) { + family = AF_INET6; + } + + if (ks_addr_set(&tcp_data.addr, ip, tcp_port, family) != KS_STATUS_SUCCESS) { + r = 0; + printf("WS CLIENT Can't set ADDR\n"); + goto end; + } + + if ((tcp_data.sock = socket(family, SOCK_STREAM, IPPROTO_TCP)) == KS_SOCK_INVALID) { + r = 0; + printf("WS CLIENT Can't create sock family %d\n", family); + goto end; + } + + ks_socket_option(tcp_data.sock, SO_REUSEADDR, KS_TRUE); + ks_socket_option(tcp_data.sock, TCP_NODELAY, KS_TRUE); + + tcp_data.ip = ip; + + ks_thread_create(&thread_p, tcp_sock_server, &tcp_data, pool); + + while(!tcp_data.ready && --sanity > 0) { + ks_sleep(10000); + } + + ks_addr_set(&addr, ip, tcp_port, family); + cl_sock = ks_socket_connect(SOCK_STREAM, IPPROTO_TCP, &addr); + + printf("WS %s CLIENT SOCKET %d %s %d\n", ssl ? "SSL" : "PLAIN", (int)cl_sock, addr.host, addr.port); + + if (kws_init(&kws, cl_sock, tcp_data.client_profile.ssl_ctx, "/verto:tatooine.freeswitch.org:verto", KWS_BLOCK, pool) != KS_STATUS_SUCCESS) { + printf("WS CLIENT CREATE FAIL\n"); + goto end; + } + + kws_write_frame(kws, WSOC_TEXT, __MSG, strlen(__MSG)); + + kws_opcode_t oc; + uint8_t *data; + ks_ssize_t bytes; + + bytes = kws_read_frame(kws, &oc, &data); + printf("WS CLIENT READ %ld bytes [%s]\n", bytes, (char *)data); + + end: + + kws_destroy(&kws); + + if (ssl) { + SSL_CTX_free(tcp_data.client_profile.ssl_ctx); + } + + if (tcp_data.sock != KS_SOCK_INVALID) { + ks_socket_shutdown(tcp_data.sock, 2); + ks_socket_close(&tcp_data.sock); + } + + if (thread_p) { + ks_thread_join(thread_p); + } + + ks_socket_close(&cl_sock); + + ks_pool_close(&pool); + + return r; +} + + + +int main(void) +{ + int have_v4 = 0, have_v6 = 0; + ks_find_local_ip(v4, sizeof(v4), &mask, AF_INET, NULL); + ks_find_local_ip(v6, sizeof(v6), NULL, AF_INET6, NULL); + ks_init(); + + printf("IPS: v4: [%s] v6: [%s]\n", v4, v6); + + have_v4 = zstr_buf(v4) ? 0 : 1; + have_v6 = zstr_buf(v6) ? 0 : 1; + + plan((have_v4 * 2) + (have_v6 * 2) + 1); + + ok(have_v4 || have_v6); + + if (have_v4 || have_v6) { + ks_gen_cert("/tmp", "testwebsock.pem"); + } + + if (have_v4) { + ok(test_ws(v4, 0)); + ok(test_ws(v4, 1)); + } + + if (have_v6) { + ok(test_ws(v6, 0)); + ok(test_ws(v6, 1)); + } + + unlink("/tmp/testwebsock.pem"); + ks_shutdown(); + + done_testing(); +} diff --git a/libs/libks/test/tmp b/libs/libks/test/tmp new file mode 100644 index 0000000000..e69de29bb2