SHOGUN  v3.2.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines
SparseFeatures.cpp
Go to the documentation of this file.
00001 #include <shogun/lib/memory.h>
00002 #include <shogun/features/SparseFeatures.h>
00003 #include <shogun/preprocessor/SparsePreprocessor.h>
00004 #include <shogun/mathematics/Math.h>
00005 #include <shogun/lib/DataType.h>
00006 #include <shogun/labels/RegressionLabels.h>
00007 #include <shogun/io/SGIO.h>
00008 
00009 #include <string.h>
00010 #include <stdlib.h>
00011 
00012 namespace shogun
00013 {
00014 
00015 template<class ST> CSparseFeatures<ST>::CSparseFeatures(int32_t size)
00016 : CDotFeatures(size), feature_cache(NULL)
00017 {
00018     init();
00019 }
00020 
00021 template<class ST> CSparseFeatures<ST>::CSparseFeatures(SGSparseMatrix<ST> sparse)
00022 : CDotFeatures(0), feature_cache(NULL)
00023 {
00024     init();
00025 
00026     set_sparse_feature_matrix(sparse);
00027 }
00028 
00029 template<class ST> CSparseFeatures<ST>::CSparseFeatures(SGMatrix<ST> dense)
00030 : CDotFeatures(0), feature_cache(NULL)
00031 {
00032     init();
00033 
00034     set_full_feature_matrix(dense);
00035 }
00036 
00037 template<class ST> CSparseFeatures<ST>::CSparseFeatures(const CSparseFeatures & orig)
00038 : CDotFeatures(orig), sparse_feature_matrix(orig.sparse_feature_matrix),
00039     feature_cache(orig.feature_cache)
00040 {
00041     init();
00042 
00043     m_subset_stack=orig.m_subset_stack;
00044     SG_REF(m_subset_stack);
00045 }
00046 template<class ST> CSparseFeatures<ST>::CSparseFeatures(CFile* loader)
00047 : CDotFeatures(), feature_cache(NULL)
00048 {
00049     init();
00050 
00051     load(loader);
00052 }
00053 
00054 template<class ST> CSparseFeatures<ST>::~CSparseFeatures()
00055 {
00056     SG_UNREF(feature_cache);
00057 }
00058 
00059 template<class ST> CFeatures* CSparseFeatures<ST>::duplicate() const
00060 {
00061     return new CSparseFeatures<ST>(*this);
00062 }
00063 
00064 template<class ST> ST CSparseFeatures<ST>::get_feature(int32_t num, int32_t index)
00065 {
00066     REQUIRE(index>=0 && index<get_num_features(),
00067         "get_feature(num=%d,index=%d): index exceeds [0;%d]\n",
00068         num, index, get_num_features()-1);
00069 
00070     SGSparseVector<ST> sv=get_sparse_feature_vector(num);
00071     ST ret = sv.get_feature(index);
00072 
00073     free_sparse_feature_vector(num);
00074     return ret;
00075 }
00076 
00077 template<class ST> SGVector<ST> CSparseFeatures<ST>::get_full_feature_vector(int32_t num)
00078 {
00079     SGSparseVector<ST> sv=get_sparse_feature_vector(num);
00080     SGVector<ST> dense = sv.get_dense(get_num_features());
00081     free_sparse_feature_vector(num);
00082     return dense;
00083 }
00084 
00085 template<class ST> int32_t CSparseFeatures<ST>::get_nnz_features_for_vector(int32_t num)
00086 {
00087     SGSparseVector<ST> sv = get_sparse_feature_vector(num);
00088     int32_t len=sv.num_feat_entries;
00089     free_sparse_feature_vector(num);
00090     return len;
00091 }
00092 
00093 template<class ST> SGSparseVector<ST> CSparseFeatures<ST>::get_sparse_feature_vector(int32_t num)
00094 {
00095     REQUIRE(num>=0 && num<get_num_vectors(),
00096         "get_sparse_feature_vector(num=%d): num exceeds [0;%d]\n",
00097         num, get_num_vectors()-1);
00098     index_t real_num=m_subset_stack->subset_idx_conversion(num);
00099 
00100     if (sparse_feature_matrix.sparse_matrix)
00101     {
00102         return sparse_feature_matrix[real_num];
00103     }
00104     else
00105     {
00106         SGSparseVector<ST> result;
00107         if (feature_cache)
00108         {
00109             result.features=feature_cache->lock_entry(num);
00110 
00111             if (result.features)
00112                 return result;
00113             else
00114             {
00115                 result.features=feature_cache->set_entry(num);
00116             }
00117         }
00118 
00119         //if (!result.features)
00120         //  result.do_free=true;
00121 
00122         result.features=compute_sparse_feature_vector(num,
00123             result.num_feat_entries, result.features);
00124 
00125 
00126         if (get_num_preprocessors())
00127         {
00128             int32_t tmp_len=result.num_feat_entries;
00129             SGSparseVectorEntry<ST>* tmp_feat_before=result.features;
00130             SGSparseVectorEntry<ST>* tmp_feat_after = NULL;
00131 
00132             for (int32_t i=0; i<get_num_preprocessors(); i++)
00133             {
00134                 //tmp_feat_after=((CSparsePreprocessor<ST>*) get_preproc(i))->apply_to_feature_vector(tmp_feat_before, tmp_len);
00135 
00136                 if (i!=0)   // delete feature vector, except for the the first one, i.e., feat
00137                     SG_FREE(tmp_feat_before);
00138                 tmp_feat_before=tmp_feat_after;
00139             }
00140 
00141             if (tmp_feat_after)
00142             {
00143                 memcpy(result.features, tmp_feat_after,
00144                         sizeof(SGSparseVectorEntry<ST>)*tmp_len);
00145 
00146                 SG_FREE(tmp_feat_after);
00147                 result.num_feat_entries=tmp_len;
00148             }
00149             SG_DEBUG("len: %d len2: %d\n", result.num_feat_entries, get_num_features())
00150         }
00151         return result ;
00152     }
00153 }
00154 
00155 template<class ST> ST CSparseFeatures<ST>::dense_dot(ST alpha, int32_t num, ST* vec, int32_t dim, ST b)
00156 {
00157     SGSparseVector<ST> sv=get_sparse_feature_vector(num);
00158     ST result = sv.dense_dot(alpha,vec,dim,b);
00159     free_sparse_feature_vector(num);
00160     return result;
00161 }
00162 
00163 template<class ST> void CSparseFeatures<ST>::add_to_dense_vec(float64_t alpha, int32_t num, float64_t* vec, int32_t dim, bool abs_val)
00164 {
00165     REQUIRE(vec, "add_to_dense_vec(num=%d,dim=%d): vec must not be NULL\n",
00166         num, dim);
00167     REQUIRE(dim>=get_num_features(),
00168         "add_to_dense_vec(num=%d,dim=%d): dim should contain number of features %d\n",
00169         num, dim, get_num_features());
00170 
00171     SGSparseVector<ST> sv=get_sparse_feature_vector(num);
00172 
00173     if (sv.features)
00174     {
00175         if (abs_val)
00176         {
00177             for (int32_t i=0; i<sv.num_feat_entries; i++)
00178             {
00179                 vec[sv.features[i].feat_index]+=alpha
00180                     *CMath::abs(sv.features[i].entry);
00181             }
00182         }
00183         else
00184         {
00185             for (int32_t i=0; i<sv.num_feat_entries; i++)
00186             {
00187                 vec[sv.features[i].feat_index]+=alpha
00188                         *sv.features[i].entry;
00189             }
00190         }
00191     }
00192 
00193     free_sparse_feature_vector(num);
00194 }
00195 
00196 template<>
00197 void CSparseFeatures<complex128_t>::add_to_dense_vec(float64_t alpha,
00198     int32_t num, float64_t* vec, int32_t dim, bool abs_val)
00199 {
00200     SG_NOTIMPLEMENTED;
00201 }
00202 
00203 template<class ST> void CSparseFeatures<ST>::free_sparse_feature_vector(int32_t num)
00204 {
00205     if (feature_cache)
00206         feature_cache->unlock_entry(m_subset_stack->subset_idx_conversion(num));
00207 
00208     //vec.free_vector();
00209 }
00210 
00211 template<class ST> SGSparseMatrix<ST> CSparseFeatures<ST>::get_sparse_feature_matrix()
00212 {
00213     if (m_subset_stack->has_subsets())
00214         SG_ERROR("Not allowed with subset\n");
00215 
00216     return sparse_feature_matrix;
00217 }
00218 
00219 template<class ST> CSparseFeatures<ST>* CSparseFeatures<ST>::get_transposed()
00220 {
00221     if (m_subset_stack->has_subsets())
00222         SG_ERROR("Not allowed with subset\n");
00223 
00224     return new CSparseFeatures<ST>(sparse_feature_matrix.get_transposed());
00225 }
00226 
00227 template<class ST> void CSparseFeatures<ST>::set_sparse_feature_matrix(SGSparseMatrix<ST> sm)
00228 {
00229     if (m_subset_stack->has_subsets())
00230         SG_ERROR("Not allowed with subset\n");
00231 
00232     sparse_feature_matrix=sm;
00233 
00234     // TODO: check should be implemented in sparse matrix class
00235     for (int32_t j=0; j<get_num_vectors(); j++) {
00236         SGSparseVector<ST> sv=get_sparse_feature_vector(j);
00237         REQUIRE(get_num_features() >= sv.get_num_dimensions(),
00238             "sparse_matrix[%d] check failed (matrix features %d >= vector dimension %d)\n",
00239             j, get_num_features(), sv.get_num_dimensions());
00240     }
00241 }
00242 
00243 template<class ST> SGMatrix<ST> CSparseFeatures<ST>::get_full_feature_matrix()
00244 {
00245     SGMatrix<ST> full(get_num_features(), get_num_vectors());
00246     full.zero();
00247 
00248     SG_INFO("converting sparse features to full feature matrix of %d x %d"
00249             " entries\n", sparse_feature_matrix.num_vectors, get_num_features())
00250 
00251     for (int32_t v=0; v<full.num_cols; v++)
00252     {
00253         int32_t idx=m_subset_stack->subset_idx_conversion(v);
00254         SGSparseVector<ST> current=sparse_feature_matrix[idx];
00255 
00256         for (int32_t f=0; f<current.num_feat_entries; f++)
00257         {
00258             int64_t offs=(v*get_num_features())
00259                     +current.features[f].feat_index;
00260 
00261             full.matrix[offs]=current.features[f].entry;
00262         }
00263     }
00264 
00265     return full;
00266 }
00267 
00268 template<class ST> void CSparseFeatures<ST>::free_sparse_features()
00269 {
00270     free_sparse_feature_matrix();
00271     SG_UNREF(feature_cache);
00272 }
00273 
00274 template<class ST> void CSparseFeatures<ST>::free_sparse_feature_matrix()
00275 {
00276     sparse_feature_matrix=SGSparseMatrix<ST>();
00277 }
00278 
00279 template<class ST> void CSparseFeatures<ST>::set_full_feature_matrix(SGMatrix<ST> full)
00280 {
00281     remove_all_subsets();
00282     free_sparse_feature_matrix();
00283     sparse_feature_matrix.from_dense(full);
00284 }
00285 
00286 template<class ST> bool CSparseFeatures<ST>::apply_preprocessor(bool force_preprocessing)
00287 {
00288     SG_INFO("force: %d\n", force_preprocessing)
00289 
00290     if ( sparse_feature_matrix.sparse_matrix && get_num_preprocessors() )
00291     {
00292         for (int32_t i=0; i<get_num_preprocessors(); i++)
00293         {
00294             if ( (!is_preprocessed(i) || force_preprocessing) )
00295             {
00296                 set_preprocessed(i);
00297                 SG_INFO("preprocessing using preproc %s\n", get_preprocessor(i)->get_name())
00298                 if (((CSparsePreprocessor<ST>*) get_preprocessor(i))->apply_to_sparse_feature_matrix(this) == NULL)
00299                     return false;
00300             }
00301             return true;
00302         }
00303         return true;
00304     }
00305     else
00306     {
00307         SG_WARNING("no sparse feature matrix available or features already preprocessed - skipping.\n")
00308         return false;
00309     }
00310 }
00311 
00312 template<class ST> void CSparseFeatures<ST>::obtain_from_simple(CDenseFeatures<ST>* sf)
00313 {
00314     SGMatrix<ST> fm=sf->get_feature_matrix();
00315     ASSERT(fm.matrix && fm.num_cols>0 && fm.num_rows>0)
00316     set_full_feature_matrix(fm);
00317 }
00318 
00319 template<> void CSparseFeatures<complex128_t>::obtain_from_simple(CDenseFeatures<complex128_t>* sf)
00320 {
00321     SG_NOTIMPLEMENTED;
00322 }
00323 
00324 template<class ST> int32_t  CSparseFeatures<ST>::get_num_vectors() const
00325 {
00326     return m_subset_stack->has_subsets() ? m_subset_stack->get_size() : sparse_feature_matrix.num_vectors;
00327 }
00328 
00329 template<class ST> int32_t  CSparseFeatures<ST>::get_num_features() const
00330 {
00331     return sparse_feature_matrix.num_features;
00332 }
00333 
00334 template<class ST> int32_t CSparseFeatures<ST>::set_num_features(int32_t num)
00335 {
00336     int32_t n=get_num_features();
00337     ASSERT(n<=num)
00338     sparse_feature_matrix.num_features=num;
00339     return sparse_feature_matrix.num_features;
00340 }
00341 
00342 template<class ST> EFeatureClass CSparseFeatures<ST>::get_feature_class() const
00343 {
00344     return C_SPARSE;
00345 }
00346 
00347 template<class ST> void CSparseFeatures<ST>::free_feature_vector(int32_t num)
00348 {
00349     if (feature_cache)
00350         feature_cache->unlock_entry(m_subset_stack->subset_idx_conversion(num));
00351 
00352     //vec.free_vector();
00353 }
00354 
00355 template<class ST> int64_t CSparseFeatures<ST>::get_num_nonzero_entries()
00356 {
00357     int64_t num=0;
00358     index_t num_vec=get_num_vectors();
00359     for (int32_t i=0; i<num_vec; i++)
00360         num+=sparse_feature_matrix[m_subset_stack->subset_idx_conversion(i)].num_feat_entries;
00361 
00362     return num;
00363 }
00364 
00365 template<class ST> float64_t* CSparseFeatures<ST>::compute_squared(float64_t* sq)
00366 {
00367     ASSERT(sq)
00368 
00369     index_t num_vec=get_num_vectors();
00370     for (int32_t i=0; i<num_vec; i++)
00371     {
00372         sq[i]=0;
00373         SGSparseVector<ST> vec=get_sparse_feature_vector(i);
00374 
00375         for (int32_t j=0; j<vec.num_feat_entries; j++)
00376             sq[i]+=vec.features[j].entry*vec.features[j].entry;
00377 
00378         free_feature_vector(i);
00379     }
00380 
00381     return sq;
00382 }
00383 
00384 template<> float64_t* CSparseFeatures<complex128_t>::compute_squared(float64_t* sq)
00385 {
00386     SG_NOTIMPLEMENTED;
00387     return sq;
00388 }
00389 
00390 template<class ST> float64_t CSparseFeatures<ST>::compute_squared_norm(
00391         CSparseFeatures<float64_t>* lhs, float64_t* sq_lhs, int32_t idx_a,
00392         CSparseFeatures<float64_t>* rhs, float64_t* sq_rhs, int32_t idx_b)
00393 {
00394     int32_t i,j;
00395     ASSERT(lhs)
00396     ASSERT(rhs)
00397 
00398     SGSparseVector<float64_t> avec=lhs->get_sparse_feature_vector(idx_a);
00399     SGSparseVector<float64_t> bvec=rhs->get_sparse_feature_vector(idx_b);
00400     ASSERT(avec.features)
00401     ASSERT(bvec.features)
00402 
00403     float64_t result=sq_lhs[idx_a]+sq_rhs[idx_b];
00404 
00405     if (avec.num_feat_entries<=bvec.num_feat_entries)
00406     {
00407         j=0;
00408         for (i=0; i<avec.num_feat_entries; i++)
00409         {
00410             int32_t a_feat_idx=avec.features[i].feat_index;
00411 
00412             while ((j<bvec.num_feat_entries)
00413                     &&(bvec.features[j].feat_index<a_feat_idx))
00414                 j++;
00415 
00416             if ((j<bvec.num_feat_entries)
00417                     &&(bvec.features[j].feat_index==a_feat_idx))
00418             {
00419                 result-=2*(avec.features[i].entry*bvec.features[j].entry);
00420                 j++;
00421             }
00422         }
00423     }
00424     else
00425     {
00426         j=0;
00427         for (i=0; i<bvec.num_feat_entries; i++)
00428         {
00429             int32_t b_feat_idx=bvec.features[i].feat_index;
00430 
00431             while ((j<avec.num_feat_entries)
00432                     &&(avec.features[j].feat_index<b_feat_idx))
00433                 j++;
00434 
00435             if ((j<avec.num_feat_entries)
00436                     &&(avec.features[j].feat_index==b_feat_idx))
00437             {
00438                 result-=2*(bvec.features[i].entry*avec.features[j].entry);
00439                 j++;
00440             }
00441         }
00442     }
00443 
00444     ((CSparseFeatures<float64_t>*) lhs)->free_feature_vector(idx_a);
00445     ((CSparseFeatures<float64_t>*) rhs)->free_feature_vector(idx_b);
00446 
00447     return CMath::abs(result);
00448 }
00449 
00450 template<class ST> int32_t CSparseFeatures<ST>::get_dim_feature_space() const
00451 {
00452     return get_num_features();
00453 }
00454 
00455 template<class ST> float64_t CSparseFeatures<ST>::dot(int32_t vec_idx1,
00456         CDotFeatures* df, int32_t vec_idx2)
00457 {
00458     ASSERT(df)
00459     ASSERT(df->get_feature_type() == get_feature_type())
00460     ASSERT(df->get_feature_class() == get_feature_class())
00461     CSparseFeatures<ST>* sf = (CSparseFeatures<ST>*) df;
00462 
00463     SGSparseVector<ST> avec=get_sparse_feature_vector(vec_idx1);
00464     SGSparseVector<ST> bvec=sf->get_sparse_feature_vector(vec_idx2);
00465 
00466     float64_t result = SGSparseVector<ST>::sparse_dot(avec, bvec);
00467     free_sparse_feature_vector(vec_idx1);
00468     sf->free_sparse_feature_vector(vec_idx2);
00469 
00470     return result;
00471 }
00472 
00473 template<> float64_t CSparseFeatures<complex128_t>::dot(int32_t vec_idx1,
00474         CDotFeatures* df, int32_t vec_idx2)
00475 {
00476     SG_NOTIMPLEMENTED;
00477     return 0.0;
00478 }
00479 
00480 template<class ST> float64_t CSparseFeatures<ST>::dense_dot(int32_t vec_idx1, float64_t* vec2, int32_t vec2_len)
00481 {
00482     REQUIRE(vec2, "dense_dot(vec_idx1=%d,vec2_len=%d): vec2 must not be NULL\n",
00483         vec_idx1, vec2_len);
00484     REQUIRE(vec2_len>=get_num_features(),
00485         "dense_dot(vec_idx1=%d,vec2_len=%d): vec2_len should contain number of features %d %d\n",
00486         vec_idx1, vec2_len, get_num_features());
00487 
00488     float64_t result=0;
00489     SGSparseVector<ST> sv=get_sparse_feature_vector(vec_idx1);
00490 
00491     if (sv.features)
00492     {
00493         REQUIRE(get_num_features() >= sv.get_num_dimensions(),
00494             "sparse_matrix[%d] check failed (matrix features %d >= vector dimension %d)\n",
00495             vec_idx1, get_num_features(), sv.get_num_dimensions());
00496 
00497         REQUIRE(vec2_len >= sv.get_num_dimensions(),
00498             "sparse_matrix[%d] check failed (dense vector dimension %d >= vector dimension %d)\n",
00499             vec_idx1, vec2_len, sv.get_num_dimensions());
00500 
00501         for (int32_t i=0; i<sv.num_feat_entries; i++)
00502             result+=vec2[sv.features[i].feat_index]*sv.features[i].entry;
00503     }
00504 
00505     free_sparse_feature_vector(vec_idx1);
00506 
00507     return result;
00508 }
00509 
00510 template<> float64_t CSparseFeatures<complex128_t>::dense_dot(int32_t vec_idx1,
00511     float64_t* vec2, int32_t vec2_len)
00512 {
00513     SG_NOTIMPLEMENTED;
00514     return 0.0;
00515 }
00516 
00517 template<class ST> void* CSparseFeatures<ST>::get_feature_iterator(int32_t vector_index)
00518 {
00519     if (vector_index>=get_num_vectors())
00520     {
00521         SG_ERROR("Index out of bounds (number of vectors %d, you "
00522                 "requested %d)\n", get_num_vectors(), vector_index);
00523     }
00524 
00525     if (!sparse_feature_matrix.sparse_matrix)
00526         SG_ERROR("Requires a in-memory feature matrix\n")
00527 
00528     sparse_feature_iterator* it=new sparse_feature_iterator();
00529     it->sv=get_sparse_feature_vector(vector_index);
00530     it->index=0;
00531     it->vector_index=vector_index;
00532 
00533     return it;
00534 }
00535 
00536 template<class ST> bool CSparseFeatures<ST>::get_next_feature(int32_t& index, float64_t& value, void* iterator)
00537 {
00538     sparse_feature_iterator* it=(sparse_feature_iterator*) iterator;
00539     if (!it || it->index>=it->sv.num_feat_entries)
00540         return false;
00541 
00542     int32_t i=it->index++;
00543 
00544     index=it->sv.features[i].feat_index;
00545     value=(float64_t) it->sv.features[i].entry;
00546 
00547     return true;
00548 }
00549 
00550 template<> bool CSparseFeatures<complex128_t>::get_next_feature(int32_t& index,
00551     float64_t& value, void* iterator)
00552 {
00553     SG_NOTIMPLEMENTED;
00554     return false;
00555 }
00556 
00557 template<class ST> void CSparseFeatures<ST>::free_feature_iterator(void* iterator)
00558 {
00559     if (!iterator)
00560         return;
00561 
00562     delete ((sparse_feature_iterator*) iterator);
00563 }
00564 
00565 template<class ST> CFeatures* CSparseFeatures<ST>::copy_subset(SGVector<index_t> indices)
00566 {
00567     SGSparseMatrix<ST> matrix_copy=SGSparseMatrix<ST>(get_dim_feature_space(),
00568             indices.vlen);
00569 
00570     for (index_t i=0; i<indices.vlen; ++i)
00571     {
00572         /* index to copy */
00573         index_t index=indices.vector[i];
00574         index_t real_index=m_subset_stack->subset_idx_conversion(index);
00575 
00576         /* copy sparse vector */
00577         SGSparseVector<ST> current=get_sparse_feature_vector(real_index);
00578         matrix_copy.sparse_matrix[i]=current;
00579 
00580         free_sparse_feature_vector(index);
00581     }
00582 
00583     CFeatures* result=new CSparseFeatures<ST>(matrix_copy);
00584     return result;
00585 }
00586 
00587 template<class ST> SGSparseVectorEntry<ST>* CSparseFeatures<ST>::compute_sparse_feature_vector(int32_t num,
00588     int32_t& len, SGSparseVectorEntry<ST>* target)
00589 {
00590     SG_NOTIMPLEMENTED
00591 
00592     len=0;
00593     return NULL;
00594 }
00595 
00596 template<class ST> void CSparseFeatures<ST>::sort_features()
00597 {
00598     sparse_feature_matrix.sort_features();
00599 }
00600 
00601 template<class ST> void CSparseFeatures<ST>::init()
00602 {
00603     set_generic<ST>();
00604 
00605     m_parameters->add_vector(&sparse_feature_matrix.sparse_matrix, &sparse_feature_matrix.num_vectors,
00606             "sparse_feature_matrix",
00607             "Array of sparse vectors.");
00608     m_parameters->add(&sparse_feature_matrix.num_features, "sparse_feature_matrix.num_features",
00609             "Total number of features.");
00610 }
00611 
00612 #define GET_FEATURE_TYPE(sg_type, f_type)                                   \
00613 template<> EFeatureType CSparseFeatures<sg_type>::get_feature_type() const  \
00614 {                                                                           \
00615     return f_type;                                                          \
00616 }
00617 GET_FEATURE_TYPE(bool, F_BOOL)
00618 GET_FEATURE_TYPE(char, F_CHAR)
00619 GET_FEATURE_TYPE(uint8_t, F_BYTE)
00620 GET_FEATURE_TYPE(int8_t, F_BYTE)
00621 GET_FEATURE_TYPE(int16_t, F_SHORT)
00622 GET_FEATURE_TYPE(uint16_t, F_WORD)
00623 GET_FEATURE_TYPE(int32_t, F_INT)
00624 GET_FEATURE_TYPE(uint32_t, F_UINT)
00625 GET_FEATURE_TYPE(int64_t, F_LONG)
00626 GET_FEATURE_TYPE(uint64_t, F_ULONG)
00627 GET_FEATURE_TYPE(float32_t, F_SHORTREAL)
00628 GET_FEATURE_TYPE(float64_t, F_DREAL)
00629 GET_FEATURE_TYPE(floatmax_t, F_LONGREAL)
00630 GET_FEATURE_TYPE(complex128_t, F_ANY)
00631 #undef GET_FEATURE_TYPE
00632 
00633 template<class ST> void CSparseFeatures<ST>::load(CFile* loader)
00634 {
00635     remove_all_subsets();
00636     ASSERT(loader)
00637     free_sparse_feature_matrix();
00638     sparse_feature_matrix.load(loader);
00639 }
00640 
00641 template<class ST> SGVector<float64_t> CSparseFeatures<ST>::load_with_labels(CLibSVMFile* loader)
00642 {
00643     remove_all_subsets();
00644     ASSERT(loader)
00645     free_sparse_feature_matrix();
00646     return sparse_feature_matrix.load_with_labels(loader);
00647 }
00648 
00649 template<class ST> void CSparseFeatures<ST>::save(CFile* writer)
00650 {
00651     if (m_subset_stack->has_subsets())
00652         SG_ERROR("Not allowed with subset\n");
00653     ASSERT(writer)
00654     sparse_feature_matrix.save(writer);
00655 }
00656 
00657 template<class ST> void CSparseFeatures<ST>::save_with_labels(CLibSVMFile* writer, SGVector<float64_t> labels)
00658 {
00659     if (m_subset_stack->has_subsets())
00660         SG_ERROR("Not allowed with subset\n");
00661     ASSERT(writer)
00662     sparse_feature_matrix.save_with_labels(writer, labels);
00663 }
00664 
00665 template class CSparseFeatures<bool>;
00666 template class CSparseFeatures<char>;
00667 template class CSparseFeatures<int8_t>;
00668 template class CSparseFeatures<uint8_t>;
00669 template class CSparseFeatures<int16_t>;
00670 template class CSparseFeatures<uint16_t>;
00671 template class CSparseFeatures<int32_t>;
00672 template class CSparseFeatures<uint32_t>;
00673 template class CSparseFeatures<int64_t>;
00674 template class CSparseFeatures<uint64_t>;
00675 template class CSparseFeatures<float32_t>;
00676 template class CSparseFeatures<float64_t>;
00677 template class CSparseFeatures<floatmax_t>;
00678 template class CSparseFeatures<complex128_t>;
00679 }
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines

SHOGUN Machine Learning Toolbox - Documentation