te')); return $arr; } /* 遍历用户所有主题 * @param $uid 用户ID * @param int $page 页数 * @param int $pagesize 每页记录条数 * @param bool $desc 排序方式 TRUE降序 FALSE升序 * @param string $key 返回的数组用那一列的值作为 key * @param array $col 查询哪些列 */ function thread_tid_find_by_uid($uid, $page = 1, $pagesize = 1000, $desc = TRUE, $key = 'tid', $col = array()) { if (empty($uid)) return array(); $orderby = TRUE == $desc ? -1 : 1; $arr = thread_tid__find($cond = array('uid' => $uid), array('tid' => $orderby), $page, $pagesize, $key, $col); return $arr; } // 遍历栏目下tid 支持数组 $fid = array(1,2,3) function thread_tid_find_by_fid($fid, $page = 1, $pagesize = 1000, $desc = TRUE) { if (empty($fid)) return array(); $orderby = TRUE == $desc ? -1 : 1; $arr = thread_tid__find($cond = array('fid' => $fid), array('tid' => $orderby), $page, $pagesize, 'tid', array('tid', 'verify_date')); return $arr; } function thread_tid_delete($tid) { if (empty($tid)) return FALSE; $r = thread_tid__delete(array('tid' => $tid)); return $r; } function thread_tid_count() { $n = thread_tid__count(); return $n; } // 统计用户主题数 大数量下严谨使用非主键统计 function thread_uid_count($uid) { $n = thread_tid__count(array('uid' => $uid)); return $n; } // 统计栏目主题数 大数量下严谨使用非主键统计 function thread_fid_count($fid) { $n = thread_tid__count(array('fid' => $fid)); return $n; } ?>tensorflow - RuntimeError: 'tuple' object has no attribute 'rank'—custom DenseVariational priorp
最新消息:雨落星辰是一个专注网站SEO优化、网站SEO诊断、搜索引擎研究、网络营销推广、网站策划运营及站长类的自媒体原创博客

tensorflow - RuntimeError: 'tuple' object has no attribute 'rank'—custom DenseVariational priorp

programmeradmin3浏览0评论

I'm encountering a runtime error when building my Bayesian neural network using tfp.layers.DenseVariational layers on QuantConnect. The error occurs during model construction and states:

pgsql
Copy
Runtime Error: 'tuple' object has no attribute 'rank'
  at assert_input_compatibility
    ndim = x.shape.rank
           ^^^^^^^^^^^^
 in input_spec.py: line 250
  at error_handler
    raise e.with_traceback(filtered_tb) from None
 in traceback_utils.py: line 69
  at build_model
    x = tfpl.DenseVariational(
        ^^^^^^^^^^^^^^^^^^^^^^
 in main.py: line 146
  at __init__
    self.model = self.build_model()
                 ^^^^^^^^^^^^^^^^^^
 in main.py: line 137
  at TrainBayesianProfitabilityModel
    self.bayesianClassifier = 
BayesianNNProfitabilityClassifier(input_dim=int(X.shape[1]))
                              ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 in main.py: line 499
  at TrainingPhaseTasks
    self.TrainBayesianProfitabilityModel()
 in main.py: line 312
  at CheckForTrainingRestart
    self.TrainingPhaseTasks()
 in main.py: line 293 (Open Stack Trace)

This indicates that one of my custom functions—either prior_trainable or posterior_mean_field—is returning a tuple instead of a TensorFlow tensor or a distribution object with proper tensor attributes. My custom functions are intended to create trainable probability distributions for the weights and biases of the variational layer, but it appears that somewhere in the process, a tuple is being passed or returned. This causes the internal input compatibility checks (which try to access .rank) in TensorFlow Probability to fail.

I suspect the issue might be related to how I'm slicing or handling the parameters from the input tensor within these custom functions, leading to a type mismatch. I'm using TensorFlow 2.x and TensorFlow Probability, and the error might be due to either an API mismatch or incorrect implementation of the custom prior/posterior functions.

I implemented custom prior and posterior functions for tfp.layers.DenseVariational and expected the model to compile and train without issues. However, when building the model, I received a runtime error stating that a 'tuple' object has no attribute 'rank', indicating that my functions are returning a tuple instead of the expected tensor/distribution object.

The custom prior and posterior functions:

def posterior_mean_field(kernel_size, bias_size=0, dtype=None):
    n = kernel_size + bias_size
    def fn(t):
        t = tf.convert_to_tensor(t, dtype=dtype)  # Ensure correct dtype
        print("Posterior input shape:", tf.shape(t))
        return tfd.MultivariateNormalDiag(
            loc=t[..., :n],
            scale_diag=1e-5 + tf.nn.softplus(t[..., n:])
        )
    return fn

def prior_trainable(kernel_size, bias_size=0, dtype=None):
    n = kernel_size + bias_size
    def fn(_):
        return tfd.MultivariateNormalDiag(
            loc=tf.Variable(tf.zeros(n), trainable=True, dtype=dtype),
            scale_diag=tf.Variable(tf.ones(n), trainable=True, dtype=dtype)
        )
    return fn

The BayesianNNProfitabilityClassifier's model building code that uses tfpl.DenseVariational:

class BayesianNNProfitabilityClassifier:
    def __init__(self, input_dim):
        self.input_dim = int(input_dim)
        self.model = self.build_model()
    
    def build_model(self):
        inputs = Input(shape=(self.input_dim,))
        x = tfpl.DenseVariational(
            16,
            make_prior_fn=prior_trainable(16, dtype=tf.float32),
            make_posterior_fn=posterior_mean_field(16, dtype=tf.float32),
            kl_weight=1/1000.0
        )(inputs)
        x = tf.keras.layers.ReLU()(x)

        x = tfpl.DenseVariational(
            12,
            make_prior_fn=prior_trainable(12, dtype=tf.float32),
            make_posterior_fn=posterior_mean_field(12, dtype=tf.float32),
            kl_weight=1/1000.0
        )(x)
        x = tf.keras.layers.ReLU()(x)

        x = tfpl.DenseVariational(
            8,
            make_prior_fn=prior_trainable(8, dtype=tf.float32),
            make_posterior_fn=posterior_mean_field(8, dtype=tf.float32),
            kl_weight=1/1000.0
        )(x)
        x = tf.keras.layers.ReLU()(x)

        outputs = tfpl.DenseVariational(
            2,
            make_prior_fn=prior_trainable(2, dtype=tf.float32),
            make_posterior_fn=posterior_mean_field(2, dtype=tf.float32),
            kl_weight=1/1000.0
        )(x)
        outputs = tf.keras.layers.Softmax()(outputs)
        
        model = tf.keras.Model(inputs, outputs)
        modelpile(optimizer=Adam(learning_rate=0.0008),
                      loss='sparse_categorical_crossentropy', metrics=['accuracy'])
        return model
发布评论

评论列表(0)

  1. 暂无评论