SHOGUN  v3.2.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines
CombinedKernel.cpp
Go to the documentation of this file.
00001 /*
00002  * This program is free software; you can redistribute it and/or modify
00003  * it under the terms of the GNU General Public License as published by
00004  * the Free Software Foundation; either version 3 of the License, or
00005  * (at your option) any later version.
00006  *
00007  * Written (W) 1999-2009 Soeren Sonnenburg
00008  * Written (W) 1999-2008 Gunnar Raetsch
00009  * Copyright (C) 1999-2009 Fraunhofer Institute FIRST and Max-Planck-Society
00010  */
00011 
00012 #include <shogun/lib/common.h>
00013 #include <shogun/io/SGIO.h>
00014 #include <shogun/lib/Signal.h>
00015 #include <shogun/base/Parallel.h>
00016 #include <shogun/lib/DynamicObjectArray.h>
00017 #include <shogun/kernel/Kernel.h>
00018 #include <shogun/kernel/CombinedKernel.h>
00019 #include <shogun/kernel/CustomKernel.h>
00020 #include <shogun/features/CombinedFeatures.h>
00021 #include <string.h>
00022 
00023 #ifndef WIN32
00024 #include <pthread.h>
00025 #endif
00026 
00027 using namespace shogun;
00028 
00029 #ifndef DOXYGEN_SHOULD_SKIP_THIS
00030 struct S_THREAD_PARAM_COMBINED_KERNEL
00031 {
00032     CKernel* kernel;
00033     float64_t* result;
00034     int32_t* vec_idx;
00035     int32_t start;
00036     int32_t end;
00038     float64_t* weights;
00039     int32_t* IDX;
00040     int32_t num_suppvec;
00041 };
00042 #endif // DOXYGEN_SHOULD_SKIP_THIS
00043 
00044 CCombinedKernel::CCombinedKernel(int32_t size, bool asw)
00045 : CKernel(size), append_subkernel_weights(asw)
00046 {
00047     init();
00048 
00049     if (append_subkernel_weights)
00050         SG_INFO("(subkernel weights are appended)\n")
00051 
00052     SG_INFO("Combined kernel created (%p)\n", this)
00053 }
00054 
00055 CCombinedKernel::~CCombinedKernel()
00056 {
00057     SG_FREE(subkernel_weights_buffer);
00058     subkernel_weights_buffer=NULL;
00059 
00060     cleanup();
00061     SG_UNREF(kernel_array);
00062 
00063     SG_INFO("Combined kernel deleted (%p).\n", this)
00064 }
00065 
00066 bool CCombinedKernel::init(CFeatures* l, CFeatures* r)
00067 {
00068     /* if the specified features are not combined features, but a single other
00069      * feature type, assume that the caller wants to use all kernels on these */
00070     if (l && r && l->get_feature_class()==r->get_feature_class() &&
00071             l->get_feature_type()==r->get_feature_type() &&
00072             l->get_feature_class()!= C_COMBINED)
00073     {
00074         SG_DEBUG("Initialising combined kernel's combined features with the "
00075                 "same instance from parameters\n");
00076         /* construct combined features with each element being the parameter */
00077         CCombinedFeatures* combined_l=new CCombinedFeatures();
00078         CCombinedFeatures* combined_r=new CCombinedFeatures();
00079         for (index_t i=0; i<get_num_subkernels(); ++i)
00080         {
00081             combined_l->append_feature_obj(l);
00082             combined_r->append_feature_obj(r);
00083         }
00084 
00085         /* recursive call with constructed combined kernel */
00086         return init(combined_l, combined_r);
00087     }
00088 
00089     CKernel::init(l,r);
00090     REQUIRE(l->get_feature_class()==C_COMBINED, "%s::init(): LHS features are"
00091             " of class %s but need to be combined features!\n",
00092             get_name(), l->get_name());
00093     REQUIRE(r->get_feature_class()==C_COMBINED, "%s::init(): RHS features are"
00094             " of class %s but need to be combined features!\n",
00095             get_name(), r->get_name());
00096     ASSERT(l->get_feature_type()==F_UNKNOWN)
00097     ASSERT(r->get_feature_type()==F_UNKNOWN)
00098 
00099     CFeatures* lf=NULL;
00100     CFeatures* rf=NULL;
00101     CKernel* k=NULL;
00102 
00103     bool result=true;
00104     index_t f_idx = 0;
00105 
00106     SG_DEBUG("Starting for loop for kernels\n")
00107     for (index_t k_idx=0; k_idx<get_num_kernels() && result; k_idx++)
00108     {
00109         k = get_kernel(k_idx);
00110 
00111         if (!k)
00112             SG_ERROR("Kernel at position %d is NULL\n", k_idx);
00113 
00114         // skip over features - the custom kernel does not need any
00115         if (k->get_kernel_type() != K_CUSTOM)
00116         {
00117             lf = ((CCombinedFeatures*) l)->get_feature_obj(f_idx);
00118             rf = ((CCombinedFeatures*) r)->get_feature_obj(f_idx);
00119             f_idx++;
00120             if (!lf || !rf)
00121             {
00122                 SG_UNREF(lf);
00123                 SG_UNREF(rf);
00124                 SG_UNREF(k);
00125                 SG_ERROR("CombinedKernel: Number of features/kernels does not match - bailing out\n")
00126             }
00127 
00128             SG_DEBUG("Initializing 0x%p - \"%s\"\n", this, k->get_name())
00129             result=k->init(lf,rf);
00130             SG_UNREF(lf);
00131             SG_UNREF(rf);
00132 
00133             if (!result)
00134                 break;
00135         }
00136         else
00137         {
00138             SG_DEBUG("Initializing 0x%p - \"%s\" (skipping init, this is a CUSTOM kernel)\n", this, k->get_name())
00139             if (!k->has_features())
00140                 SG_ERROR("No kernel matrix was assigned to this Custom kernel\n")
00141             if (k->get_num_vec_lhs() != num_lhs)
00142                 SG_ERROR("Number of lhs-feature vectors (%d) not match with number of rows (%d) of custom kernel\n", num_lhs, k->get_num_vec_lhs())
00143             if (k->get_num_vec_rhs() != num_rhs)
00144                 SG_ERROR("Number of rhs-feature vectors (%d) not match with number of cols (%d) of custom kernel\n", num_rhs, k->get_num_vec_rhs())
00145         }
00146 
00147         SG_UNREF(k);
00148     }
00149 
00150     if (!result)
00151     {
00152         SG_INFO("CombinedKernel: Initialising the following kernel failed\n")
00153         if (k)
00154         {
00155             k->list_kernel();
00156             SG_UNREF(k);
00157         }
00158         else
00159             SG_INFO("<NULL>\n")
00160         return false;
00161     }
00162 
00163     if ( (f_idx!=((CCombinedFeatures*) l)->get_num_feature_obj()) ||
00164             (f_idx!=((CCombinedFeatures*) r)->get_num_feature_obj()) )
00165         SG_ERROR("CombinedKernel: Number of features/kernels does not match - bailing out\n")
00166 
00167     init_normalizer();
00168     initialized=true;
00169     return true;
00170 }
00171 
00172 void CCombinedKernel::remove_lhs()
00173 {
00174     delete_optimization();
00175 
00176     for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
00177     {
00178         CKernel* k = get_kernel(k_idx);
00179         if (k->get_kernel_type() != K_CUSTOM)
00180             k->remove_lhs();
00181 
00182         SG_UNREF(k);
00183     }
00184     CKernel::remove_lhs();
00185 
00186     num_lhs=0;
00187 }
00188 
00189 void CCombinedKernel::remove_rhs()
00190 {
00191     delete_optimization();
00192 
00193     for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
00194     {
00195         CKernel* k = get_kernel(k_idx);
00196         if (k->get_kernel_type() != K_CUSTOM)
00197             k->remove_rhs();
00198 
00199         SG_UNREF(k);
00200     }
00201     CKernel::remove_rhs();
00202 
00203     num_rhs=0;
00204 }
00205 
00206 void CCombinedKernel::remove_lhs_and_rhs()
00207 {
00208     delete_optimization();
00209 
00210     for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
00211     {
00212         CKernel* k = get_kernel(k_idx);
00213         if (k->get_kernel_type() != K_CUSTOM)
00214             k->remove_lhs_and_rhs();
00215 
00216         SG_UNREF(k);
00217     }
00218 
00219     CKernel::remove_lhs_and_rhs();
00220 
00221     num_lhs=0;
00222     num_rhs=0;
00223 }
00224 
00225 void CCombinedKernel::cleanup()
00226 {
00227     for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
00228     {
00229         CKernel* k = get_kernel(k_idx);
00230         k->cleanup();
00231         SG_UNREF(k);
00232     }
00233 
00234     delete_optimization();
00235 
00236     CKernel::cleanup();
00237 
00238     num_lhs=0;
00239     num_rhs=0;
00240 }
00241 
00242 void CCombinedKernel::list_kernels()
00243 {
00244     SG_INFO("BEGIN COMBINED KERNEL LIST - ")
00245     this->list_kernel();
00246 
00247     for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
00248     {
00249         CKernel* k = get_kernel(k_idx);
00250         k->list_kernel();
00251         SG_UNREF(k);
00252     }
00253     SG_INFO("END COMBINED KERNEL LIST - ")
00254 }
00255 
00256 float64_t CCombinedKernel::compute(int32_t x, int32_t y)
00257 {
00258     float64_t result=0;
00259     for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
00260     {
00261         CKernel* k = get_kernel(k_idx);
00262         if (k->get_combined_kernel_weight()!=0)
00263             result += k->get_combined_kernel_weight() * k->kernel(x,y);
00264         SG_UNREF(k);
00265     }
00266 
00267     return result;
00268 }
00269 
00270 bool CCombinedKernel::init_optimization(
00271     int32_t count, int32_t *IDX, float64_t *weights)
00272 {
00273     SG_DEBUG("initializing CCombinedKernel optimization\n")
00274 
00275     delete_optimization();
00276 
00277     bool have_non_optimizable=false;
00278 
00279     for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
00280     {
00281         CKernel* k = get_kernel(k_idx);
00282 
00283         bool ret=true;
00284 
00285         if (k && k->has_property(KP_LINADD))
00286             ret=k->init_optimization(count, IDX, weights);
00287         else
00288         {
00289             SG_WARNING("non-optimizable kernel 0x%X in kernel-list\n", k)
00290             have_non_optimizable=true;
00291         }
00292 
00293         if (!ret)
00294         {
00295             have_non_optimizable=true;
00296             SG_WARNING("init_optimization of kernel 0x%X failed\n", k)
00297         }
00298 
00299         SG_UNREF(k);
00300     }
00301 
00302     if (have_non_optimizable)
00303     {
00304         SG_WARNING("some kernels in the kernel-list are not optimized\n")
00305 
00306         sv_idx=SG_MALLOC(int32_t, count);
00307         sv_weight=SG_MALLOC(float64_t, count);
00308         sv_count=count;
00309         for (int32_t i=0; i<count; i++)
00310         {
00311             sv_idx[i]=IDX[i];
00312             sv_weight[i]=weights[i];
00313         }
00314     }
00315     set_is_initialized(true);
00316 
00317     return true;
00318 }
00319 
00320 bool CCombinedKernel::delete_optimization()
00321 {
00322     for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
00323     {
00324         CKernel* k = get_kernel(k_idx);
00325         if (k->has_property(KP_LINADD))
00326             k->delete_optimization();
00327 
00328         SG_UNREF(k);
00329     }
00330 
00331     SG_FREE(sv_idx);
00332     sv_idx = NULL;
00333 
00334     SG_FREE(sv_weight);
00335     sv_weight = NULL;
00336 
00337     sv_count = 0;
00338     set_is_initialized(false);
00339 
00340     return true;
00341 }
00342 
00343 void CCombinedKernel::compute_batch(
00344     int32_t num_vec, int32_t* vec_idx, float64_t* result, int32_t num_suppvec,
00345     int32_t* IDX, float64_t* weights, float64_t factor)
00346 {
00347     ASSERT(num_vec<=get_num_vec_rhs())
00348     ASSERT(num_vec>0)
00349     ASSERT(vec_idx)
00350     ASSERT(result)
00351 
00352     //we have to do the optimization business ourselves but lets
00353     //make sure we start cleanly
00354     delete_optimization();
00355 
00356     for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
00357     {
00358         CKernel* k = get_kernel(k_idx);
00359         if (k && k->has_property(KP_BATCHEVALUATION))
00360         {
00361             if (k->get_combined_kernel_weight()!=0)
00362                 k->compute_batch(num_vec, vec_idx, result, num_suppvec, IDX, weights, k->get_combined_kernel_weight());
00363         }
00364         else
00365             emulate_compute_batch(k, num_vec, vec_idx, result, num_suppvec, IDX, weights);
00366 
00367         SG_UNREF(k);
00368     }
00369 
00370     //clean up
00371     delete_optimization();
00372 }
00373 
00374 void* CCombinedKernel::compute_optimized_kernel_helper(void* p)
00375 {
00376     S_THREAD_PARAM_COMBINED_KERNEL* params= (S_THREAD_PARAM_COMBINED_KERNEL*) p;
00377     int32_t* vec_idx=params->vec_idx;
00378     CKernel* k=params->kernel;
00379     float64_t* result=params->result;
00380 
00381     for (int32_t i=params->start; i<params->end; i++)
00382         result[i] += k->get_combined_kernel_weight()*k->compute_optimized(vec_idx[i]);
00383 
00384     return NULL;
00385 }
00386 
00387 void* CCombinedKernel::compute_kernel_helper(void* p)
00388 {
00389     S_THREAD_PARAM_COMBINED_KERNEL* params= (S_THREAD_PARAM_COMBINED_KERNEL*) p;
00390     int32_t* vec_idx=params->vec_idx;
00391     CKernel* k=params->kernel;
00392     float64_t* result=params->result;
00393     float64_t* weights=params->weights;
00394     int32_t* IDX=params->IDX;
00395     int32_t num_suppvec=params->num_suppvec;
00396 
00397     for (int32_t i=params->start; i<params->end; i++)
00398     {
00399         float64_t sub_result=0;
00400         for (int32_t j=0; j<num_suppvec; j++)
00401             sub_result += weights[j] * k->kernel(IDX[j], vec_idx[i]);
00402 
00403         result[i] += k->get_combined_kernel_weight()*sub_result;
00404     }
00405 
00406     return NULL;
00407 }
00408 
00409 void CCombinedKernel::emulate_compute_batch(
00410     CKernel* k, int32_t num_vec, int32_t* vec_idx, float64_t* result,
00411     int32_t num_suppvec, int32_t* IDX, float64_t* weights)
00412 {
00413     ASSERT(k)
00414     ASSERT(result)
00415 
00416     if (k->has_property(KP_LINADD))
00417     {
00418         if (k->get_combined_kernel_weight()!=0)
00419         {
00420             k->init_optimization(num_suppvec, IDX, weights);
00421 
00422             int32_t num_threads=parallel->get_num_threads();
00423             ASSERT(num_threads>0)
00424 
00425             if (num_threads < 2)
00426             {
00427                 S_THREAD_PARAM_COMBINED_KERNEL params;
00428                 params.kernel=k;
00429                 params.result=result;
00430                 params.start=0;
00431                 params.end=num_vec;
00432                 params.vec_idx = vec_idx;
00433                 compute_optimized_kernel_helper((void*) &params);
00434             }
00435 #ifdef HAVE_PTHREAD
00436             else
00437             {
00438                 pthread_t* threads = SG_MALLOC(pthread_t, num_threads-1);
00439                 S_THREAD_PARAM_COMBINED_KERNEL* params = SG_MALLOC(S_THREAD_PARAM_COMBINED_KERNEL, num_threads);
00440                 int32_t step= num_vec/num_threads;
00441 
00442                 int32_t t;
00443 
00444                 for (t=0; t<num_threads-1; t++)
00445                 {
00446                     params[t].kernel = k;
00447                     params[t].result = result;
00448                     params[t].start = t*step;
00449                     params[t].end = (t+1)*step;
00450                     params[t].vec_idx = vec_idx;
00451                     pthread_create(&threads[t], NULL, CCombinedKernel::compute_optimized_kernel_helper, (void*)&params[t]);
00452                 }
00453 
00454                 params[t].kernel = k;
00455                 params[t].result = result;
00456                 params[t].start = t*step;
00457                 params[t].end = num_vec;
00458                 params[t].vec_idx = vec_idx;
00459                 compute_optimized_kernel_helper((void*) &params[t]);
00460 
00461                 for (t=0; t<num_threads-1; t++)
00462                     pthread_join(threads[t], NULL);
00463 
00464                 SG_FREE(params);
00465                 SG_FREE(threads);
00466             }
00467 #endif /* HAVE_PTHREAD */
00468 
00469             k->delete_optimization();
00470         }
00471     }
00472     else
00473     {
00474         ASSERT(IDX!=NULL || num_suppvec==0)
00475         ASSERT(weights!=NULL || num_suppvec==0)
00476 
00477         if (k->get_combined_kernel_weight()!=0)
00478         { // compute the usual way for any non-optimized kernel
00479             int32_t num_threads=parallel->get_num_threads();
00480             ASSERT(num_threads>0)
00481 
00482             if (num_threads < 2)
00483             {
00484                 S_THREAD_PARAM_COMBINED_KERNEL params;
00485                 params.kernel=k;
00486                 params.result=result;
00487                 params.start=0;
00488                 params.end=num_vec;
00489                 params.vec_idx = vec_idx;
00490                 params.IDX = IDX;
00491                 params.weights = weights;
00492                 params.num_suppvec = num_suppvec;
00493                 compute_kernel_helper((void*) &params);
00494             }
00495 #ifdef HAVE_PTHREAD
00496             else
00497             {
00498                 pthread_t* threads = SG_MALLOC(pthread_t, num_threads-1);
00499                 S_THREAD_PARAM_COMBINED_KERNEL* params = SG_MALLOC(S_THREAD_PARAM_COMBINED_KERNEL, num_threads);
00500                 int32_t step= num_vec/num_threads;
00501 
00502                 int32_t t;
00503 
00504                 for (t=0; t<num_threads-1; t++)
00505                 {
00506                     params[t].kernel = k;
00507                     params[t].result = result;
00508                     params[t].start = t*step;
00509                     params[t].end = (t+1)*step;
00510                     params[t].vec_idx = vec_idx;
00511                     params[t].IDX = IDX;
00512                     params[t].weights = weights;
00513                     params[t].num_suppvec = num_suppvec;
00514                     pthread_create(&threads[t], NULL, CCombinedKernel::compute_kernel_helper, (void*)&params[t]);
00515                 }
00516 
00517                 params[t].kernel = k;
00518                 params[t].result = result;
00519                 params[t].start = t*step;
00520                 params[t].end = num_vec;
00521                 params[t].vec_idx = vec_idx;
00522                 params[t].IDX = IDX;
00523                 params[t].weights = weights;
00524                 params[t].num_suppvec = num_suppvec;
00525                 compute_kernel_helper(&params[t]);
00526 
00527                 for (t=0; t<num_threads-1; t++)
00528                     pthread_join(threads[t], NULL);
00529 
00530                 SG_FREE(params);
00531                 SG_FREE(threads);
00532             }
00533 #endif /* HAVE_PTHREAD */
00534         }
00535     }
00536 }
00537 
00538 float64_t CCombinedKernel::compute_optimized(int32_t idx)
00539 {
00540     if (!get_is_initialized())
00541     {
00542         SG_ERROR("CCombinedKernel optimization not initialized\n")
00543         return 0;
00544     }
00545 
00546     float64_t result=0;
00547 
00548     for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
00549     {
00550         CKernel* k = get_kernel(k_idx);
00551         if (k->has_property(KP_LINADD) &&
00552             k->get_is_initialized())
00553         {
00554             if (k->get_combined_kernel_weight()!=0)
00555             {
00556                 result +=
00557                     k->get_combined_kernel_weight()*k->compute_optimized(idx);
00558             }
00559         }
00560         else
00561         {
00562             ASSERT(sv_idx!=NULL || sv_count==0)
00563             ASSERT(sv_weight!=NULL || sv_count==0)
00564 
00565             if (k->get_combined_kernel_weight()!=0)
00566             { // compute the usual way for any non-optimized kernel
00567                 float64_t sub_result=0;
00568                 for (int32_t j=0; j<sv_count; j++)
00569                     sub_result += sv_weight[j] * k->kernel(sv_idx[j], idx);
00570 
00571                 result += k->get_combined_kernel_weight()*sub_result;
00572             }
00573         }
00574 
00575         SG_UNREF(k);
00576     }
00577 
00578     return result;
00579 }
00580 
00581 void CCombinedKernel::add_to_normal(int32_t idx, float64_t weight)
00582 {
00583     for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
00584     {
00585         CKernel* k = get_kernel(k_idx);
00586         k->add_to_normal(idx, weight);
00587         SG_UNREF(k);
00588     }
00589     set_is_initialized(true) ;
00590 }
00591 
00592 void CCombinedKernel::clear_normal()
00593 {
00594     for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
00595     {
00596         CKernel* k = get_kernel(k_idx);
00597         k->clear_normal() ;
00598         SG_UNREF(k);
00599     }
00600     set_is_initialized(true) ;
00601 }
00602 
00603 void CCombinedKernel::compute_by_subkernel(
00604     int32_t idx, float64_t * subkernel_contrib)
00605 {
00606     if (append_subkernel_weights)
00607     {
00608         int32_t i=0 ;
00609         for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
00610         {
00611             CKernel* k = get_kernel(k_idx);
00612             int32_t num = -1 ;
00613             k->get_subkernel_weights(num);
00614             if (num>1)
00615                 k->compute_by_subkernel(idx, &subkernel_contrib[i]) ;
00616             else
00617                 subkernel_contrib[i] += k->get_combined_kernel_weight() * k->compute_optimized(idx) ;
00618 
00619             SG_UNREF(k);
00620             i += num ;
00621         }
00622     }
00623     else
00624     {
00625         int32_t i=0 ;
00626         for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
00627         {
00628             CKernel* k = get_kernel(k_idx);
00629             if (k->get_combined_kernel_weight()!=0)
00630                 subkernel_contrib[i] += k->get_combined_kernel_weight() * k->compute_optimized(idx) ;
00631 
00632             SG_UNREF(k);
00633             i++ ;
00634         }
00635     }
00636 }
00637 
00638 const float64_t* CCombinedKernel::get_subkernel_weights(int32_t& num_weights)
00639 {
00640     SG_DEBUG("entering CCombinedKernel::get_subkernel_weights()\n")
00641 
00642     num_weights = get_num_subkernels() ;
00643     SG_FREE(subkernel_weights_buffer);
00644     subkernel_weights_buffer = SG_MALLOC(float64_t, num_weights);
00645 
00646     if (append_subkernel_weights)
00647     {
00648         SG_DEBUG("appending kernel weights\n")
00649 
00650         int32_t i=0 ;
00651         for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
00652         {
00653             CKernel* k = get_kernel(k_idx);
00654             int32_t num = -1 ;
00655             const float64_t *w = k->get_subkernel_weights(num);
00656             ASSERT(num==k->get_num_subkernels())
00657             for (int32_t j=0; j<num; j++)
00658                 subkernel_weights_buffer[i+j]=w[j] ;
00659 
00660             SG_UNREF(k);
00661             i += num ;
00662         }
00663     }
00664     else
00665     {
00666         SG_DEBUG("not appending kernel weights\n")
00667         int32_t i=0 ;
00668         for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
00669         {
00670             CKernel* k = get_kernel(k_idx);
00671             subkernel_weights_buffer[i] = k->get_combined_kernel_weight();
00672 
00673             SG_UNREF(k);
00674             i++ ;
00675         }
00676     }
00677 
00678     SG_DEBUG("leaving CCombinedKernel::get_subkernel_weights()\n")
00679     return subkernel_weights_buffer ;
00680 }
00681 
00682 SGVector<float64_t> CCombinedKernel::get_subkernel_weights()
00683 {
00684     int32_t num=0;
00685     const float64_t* w=get_subkernel_weights(num);
00686 
00687     float64_t* weights = SG_MALLOC(float64_t, num);
00688     for (int32_t i=0; i<num; i++)
00689         weights[i] = w[i];
00690 
00691     return SGVector<float64_t>(weights, num);
00692 }
00693 
00694 void CCombinedKernel::set_subkernel_weights(SGVector<float64_t> weights)
00695 {
00696     if (append_subkernel_weights)
00697     {
00698         int32_t i=0 ;
00699         for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
00700         {
00701             CKernel* k = get_kernel(k_idx);
00702             int32_t num = k->get_num_subkernels() ;
00703             ASSERT(i<weights.vlen)
00704             k->set_subkernel_weights(SGVector<float64_t>(&weights.vector[i],num, false));
00705 
00706             SG_UNREF(k);
00707             i += num ;
00708         }
00709     }
00710     else
00711     {
00712         int32_t i=0 ;
00713         for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
00714         {
00715             CKernel* k = get_kernel(k_idx);
00716             ASSERT(i<weights.vlen)
00717             k->set_combined_kernel_weight(weights.vector[i]);
00718 
00719             SG_UNREF(k);
00720             i++ ;
00721         }
00722     }
00723 }
00724 
00725 void CCombinedKernel::set_optimization_type(EOptimizationType t)
00726 {
00727     for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
00728     {
00729         CKernel* k = get_kernel(k_idx);
00730         k->set_optimization_type(t);
00731 
00732         SG_UNREF(k);
00733     }
00734 
00735     CKernel::set_optimization_type(t);
00736 }
00737 
00738 bool CCombinedKernel::precompute_subkernels()
00739 {
00740     if (get_num_kernels()==0)
00741         return false;
00742 
00743     CDynamicObjectArray* new_kernel_array = new CDynamicObjectArray();
00744 
00745     for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
00746     {
00747         CKernel* k = get_kernel(k_idx);
00748         new_kernel_array->append_element(new CCustomKernel(k));
00749 
00750         SG_UNREF(k);
00751     }
00752 
00753     SG_UNREF(kernel_array);
00754     kernel_array=new_kernel_array;
00755     SG_REF(kernel_array);
00756 
00757     return true;
00758 }
00759 
00760 void CCombinedKernel::init()
00761 {
00762     sv_count=0;
00763     sv_idx=NULL;
00764     sv_weight=NULL;
00765     subkernel_weights_buffer=NULL;
00766     initialized=false;
00767 
00768     properties |= KP_LINADD | KP_KERNCOMBINATION | KP_BATCHEVALUATION;
00769     kernel_array=new CDynamicObjectArray();
00770     SG_REF(kernel_array);
00771 
00772     SG_ADD((CSGObject**) &kernel_array, "kernel_array", "Array of kernels.",
00773         MS_AVAILABLE);
00774     m_parameters->add_vector(&sv_idx, &sv_count, "sv_idx",
00775          "Support vector index.");
00776     m_parameters->add_vector(&sv_weight, &sv_count, "sv_weight",
00777          "Support vector weights.");
00778     SG_ADD(&append_subkernel_weights, "append_subkernel_weights",
00779         "If subkernel weights are appended.", MS_AVAILABLE);
00780     SG_ADD(&initialized, "initialized", "Whether kernel is ready to be used.",
00781         MS_NOT_AVAILABLE);
00782 }
00783 
00784 SGMatrix<float64_t> CCombinedKernel::get_parameter_gradient(
00785         const TParameter* param, index_t index)
00786 {
00787     SGMatrix<float64_t> result;
00788 
00789     if (!strcmp(param->m_name, "combined_kernel_weight"))
00790     {
00791         if (append_subkernel_weights)
00792         {
00793             for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
00794             {
00795                 CKernel* k=get_kernel(k_idx);
00796                 result=k->get_parameter_gradient(param, index);
00797 
00798                 SG_UNREF(k);
00799 
00800                 if (result.num_cols*result.num_rows>0)
00801                     return result;
00802             }
00803         }
00804         else
00805         {
00806             for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
00807             {
00808                 CKernel* k=get_kernel(k_idx);
00809                 result=k->get_kernel_matrix();
00810 
00811                 SG_UNREF(k);
00812 
00813                 return result;
00814             }
00815         }
00816     }
00817     else
00818     {
00819         float64_t coeff;
00820         for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
00821         {
00822             CKernel* k=get_kernel(k_idx);
00823             SGMatrix<float64_t> derivative=
00824                     k->get_parameter_gradient(param, index);
00825 
00826             coeff=1.0;
00827 
00828             if (!append_subkernel_weights)
00829                 coeff=k->get_combined_kernel_weight();
00830 
00831             for (index_t g=0; g<derivative.num_rows; g++)
00832             {
00833                 for (index_t h=0; h<derivative.num_cols; h++)
00834                     derivative(g,h)*=coeff;
00835             }
00836 
00837             if (derivative.num_cols*derivative.num_rows>0)
00838             {
00839                 if (result.num_cols==0 && result.num_rows==0)
00840                     result=derivative;
00841                 else
00842                 {
00843                     for (index_t g=0; g<derivative.num_rows; g++)
00844                     {
00845                         for (index_t h=0; h<derivative.num_cols; h++)
00846                             result(g,h)+=derivative(g,h);
00847                     }
00848                 }
00849             }
00850 
00851             SG_UNREF(k);
00852         }
00853     }
00854 
00855     return result;
00856 }
00857 
00858 CCombinedKernel* CCombinedKernel::obtain_from_generic(CKernel* kernel)
00859 {
00860     if (kernel->get_kernel_type()!=K_COMBINED)
00861     {
00862         SG_SERROR("CCombinedKernel::obtain_from_generic(): provided kernel is "
00863                 "not of type CGaussianKernel!\n");
00864     }
00865 
00866     /* since an additional reference is returned */
00867     SG_REF(kernel);
00868     return (CCombinedKernel*)kernel;
00869 }
00870 
00871 CList* CCombinedKernel::combine_kernels(CList* kernel_list)
00872 {
00873     CList* return_list = new CList(true);
00874     SG_REF(return_list);
00875 
00876     if (!kernel_list)
00877         return return_list;
00878 
00879     if (kernel_list->get_num_elements()==0)
00880         return return_list;
00881 
00882     int32_t num_combinations = 1;
00883     int32_t list_index = 0;
00884 
00885     /* calculation of total combinations */
00886     CSGObject* list = kernel_list->get_first_element();
00887     while (list)
00888     {
00889         CList* c_list= dynamic_cast<CList* >(list);
00890         if (!c_list)
00891         {
00892             SG_SERROR("CCombinedKernel::combine_kernels() : Failed to cast list of type "
00893                     "%s to type CList\n", list->get_name());
00894         }
00895 
00896         if (c_list->get_num_elements()==0)
00897         {
00898             SG_SERROR("CCombinedKernel::combine_kernels() : Sub-list in position %d "
00899                     "is empty.\n", list_index);
00900         }
00901 
00902         num_combinations *= c_list->get_num_elements();
00903 
00904         if (kernel_list->get_delete_data())
00905             SG_UNREF(list);
00906 
00907         list = kernel_list->get_next_element();
00908         ++list_index;
00909     }
00910 
00911     /* creation of CCombinedKernels */
00912     CDynamicObjectArray kernel_array(num_combinations);
00913     for (index_t i=0; i<num_combinations; ++i)
00914     {
00915         CCombinedKernel* c_kernel = new CCombinedKernel();
00916         return_list->append_element(c_kernel);
00917         kernel_array.push_back(c_kernel);
00918     }
00919 
00920     /* first pass */
00921     list = kernel_list->get_first_element();
00922     CList* c_list = dynamic_cast<CList* >(list);
00923 
00924     /* kernel index in the list */
00925     index_t kernel_index = 0;
00926 
00927     /* here we duplicate the first list in the following form
00928     *  a,b,c,d,   a,b,c,d  ......   a,b,c,d  ---- for  a total of num_combinations elements
00929     */
00930     EKernelType prev_kernel_type = K_UNKNOWN;
00931     bool first_kernel = true;
00932     for (CSGObject* kernel=c_list->get_first_element(); kernel; kernel=c_list->get_next_element())
00933     {
00934         CKernel* c_kernel = dynamic_cast<CKernel* >(kernel);
00935 
00936         if (first_kernel)
00937              first_kernel = false;
00938         else if (c_kernel->get_kernel_type()!=prev_kernel_type)
00939         {
00940             SG_SERROR("CCombinedKernel::combine_kernels() : Sub-list in position "
00941                     "0 contains different types of kernels\n");
00942         }
00943 
00944         prev_kernel_type = c_kernel->get_kernel_type();
00945 
00946         for (index_t index=kernel_index; index<num_combinations; index+=c_list->get_num_elements())
00947         {
00948             CCombinedKernel* comb_kernel =
00949                     dynamic_cast<CCombinedKernel* >(kernel_array.get_element(index));
00950             comb_kernel->append_kernel(c_kernel);
00951             SG_UNREF(comb_kernel);
00952         }
00953         ++kernel_index;
00954         if (c_list->get_delete_data())
00955             SG_UNREF(kernel);
00956     }
00957 
00958     if (kernel_list->get_delete_data())
00959         SG_UNREF(list);
00960 
00961     /* how often each kernel of the sub-list must appear */
00962     int32_t freq = c_list->get_num_elements();
00963 
00964     /* in this loop we replicate each kernel freq times
00965     *  until we assign to all the CombinedKernels a sub-kernel from this list
00966     *  That is for num_combinations */
00967     list = kernel_list->get_next_element();
00968     list_index = 1;
00969     while (list)
00970     {
00971         c_list = dynamic_cast<CList* >(list);
00972 
00973         /* index of kernel in the list */
00974         kernel_index = 0;
00975         first_kernel = true;
00976         for (CSGObject* kernel=c_list->get_first_element(); kernel; kernel=c_list->get_next_element())
00977         {
00978             CKernel* c_kernel = dynamic_cast<CKernel* >(kernel);
00979 
00980             if (first_kernel)
00981                 first_kernel = false;
00982             else if (c_kernel->get_kernel_type()!=prev_kernel_type)
00983             {
00984                 SG_SERROR("CCombinedKernel::combine_kernels() : Sub-list in position "
00985                         "%d contains different types of kernels\n", list_index);
00986             }
00987 
00988             prev_kernel_type = c_kernel->get_kernel_type();
00989 
00990             /* moves the index so that we keep filling in, the way we do, until we reach the end of the list of combinedkernels */
00991             for (index_t base=kernel_index*freq; base<num_combinations; base+=c_list->get_num_elements()*freq)
00992             {
00993                 /* inserts freq consecutives times the current kernel */
00994                 for (index_t index=0; index<freq; ++index)
00995                 {
00996                     CCombinedKernel* comb_kernel =
00997                             dynamic_cast<CCombinedKernel* >(kernel_array.get_element(base+index));
00998                     comb_kernel->append_kernel(c_kernel);
00999                     SG_UNREF(comb_kernel);
01000                 }
01001             }
01002             ++kernel_index;
01003 
01004             if (c_list->get_delete_data())
01005                 SG_UNREF(kernel);
01006         }
01007 
01008         freq *= c_list->get_num_elements();
01009         if (kernel_list->get_delete_data())
01010             SG_UNREF(list);
01011         list = kernel_list->get_next_element();
01012         ++list_index;
01013     }
01014 
01015     return return_list;
01016 }
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines

SHOGUN Machine Learning Toolbox - Documentation