te')); return $arr; } /* 遍历用户所有主题 * @param $uid 用户ID * @param int $page 页数 * @param int $pagesize 每页记录条数 * @param bool $desc 排序方式 TRUE降序 FALSE升序 * @param string $key 返回的数组用那一列的值作为 key * @param array $col 查询哪些列 */ function thread_tid_find_by_uid($uid, $page = 1, $pagesize = 1000, $desc = TRUE, $key = 'tid', $col = array()) { if (empty($uid)) return array(); $orderby = TRUE == $desc ? -1 : 1; $arr = thread_tid__find($cond = array('uid' => $uid), array('tid' => $orderby), $page, $pagesize, $key, $col); return $arr; } // 遍历栏目下tid 支持数组 $fid = array(1,2,3) function thread_tid_find_by_fid($fid, $page = 1, $pagesize = 1000, $desc = TRUE) { if (empty($fid)) return array(); $orderby = TRUE == $desc ? -1 : 1; $arr = thread_tid__find($cond = array('fid' => $fid), array('tid' => $orderby), $page, $pagesize, 'tid', array('tid', 'verify_date')); return $arr; } function thread_tid_delete($tid) { if (empty($tid)) return FALSE; $r = thread_tid__delete(array('tid' => $tid)); return $r; } function thread_tid_count() { $n = thread_tid__count(); return $n; } // 统计用户主题数 大数量下严谨使用非主键统计 function thread_uid_count($uid) { $n = thread_tid__count(array('uid' => $uid)); return $n; } // 统计栏目主题数 大数量下严谨使用非主键统计 function thread_fid_count($fid) { $n = thread_tid__count(array('fid' => $fid)); return $n; } ?>Can't read parquet file with polars while pyarrow can - Stack Overflow
最新消息:雨落星辰是一个专注网站SEO优化、网站SEO诊断、搜索引擎研究、网络营销推广、网站策划运营及站长类的自媒体原创博客

Can't read parquet file with polars while pyarrow can - Stack Overflow

programmeradmin2浏览0评论

I am getting a dtype exception

pyo3_runtime.PanicException: Arrow datatype Map(Field { name: "key_value", dtype: LargeList(Field { name: "key_value", dtype: Struct([Field { name: "key", dtype: Utf8View, is_nullable: false, metadata: None }, Field { name: "value", dtype: Int64, is_nullable: true, metadata: None }]), is_nullable: true, metadata: None }), is_nullable: true, metadata: None }, false) not supported by Polars. You probably need to activate that data-type feature.

I can read it if I set use_pyarrow=True but I need to use lazy mode.

I have a lot of files so my workaround was to read each file in eager mode with pyarrow engine and save it again. I tried to use pyarrow dataset and scan_pyarrow_dataset but had no success. Any help will be appreciated

发布评论

评论列表(0)

  1. 暂无评论