@@ -6343,7 +6343,7 @@ static struct ggml_tensor * ggml_cpy_impl(
6343
6343
}
6344
6344
6345
6345
// make a view of the destination
6346
- struct ggml_tensor * result = b->op == GGML_OP_NONE ? b : ggml_view_tensor(ctx, b);
6346
+ struct ggml_tensor * result = ggml_view_tensor(ctx, b);
6347
6347
if (strlen(b->name) > 0) {
6348
6348
ggml_format_name(result, "%s (copy of %s)", b->name, a->name);
6349
6349
} else {
@@ -6406,6 +6406,54 @@ struct ggml_tensor * ggml_cont_inplace(
6406
6406
return ggml_cont_impl(ctx, a, true);
6407
6407
}
6408
6408
6409
+
6410
+ // make contiguous, with new shape
6411
+ GGML_API struct ggml_tensor * ggml_cont_1d(
6412
+ struct ggml_context * ctx,
6413
+ struct ggml_tensor * a,
6414
+ int64_t ne0) {
6415
+ return ggml_cont_4d(ctx, a, ne0, 1, 1, 1);
6416
+ }
6417
+
6418
+ GGML_API struct ggml_tensor * ggml_cont_2d(
6419
+ struct ggml_context * ctx,
6420
+ struct ggml_tensor * a,
6421
+ int64_t ne0,
6422
+ int64_t ne1) {
6423
+ return ggml_cont_4d(ctx, a, ne0, ne1, 1, 1);
6424
+ }
6425
+
6426
+ GGML_API struct ggml_tensor * ggml_cont_3d(
6427
+ struct ggml_context * ctx,
6428
+ struct ggml_tensor * a,
6429
+ int64_t ne0,
6430
+ int64_t ne1,
6431
+ int64_t ne2) {
6432
+ return ggml_cont_4d(ctx, a, ne0, ne1, ne2, 1);
6433
+ }
6434
+
6435
+ struct ggml_tensor * ggml_cont_4d(
6436
+ struct ggml_context * ctx,
6437
+ struct ggml_tensor * a,
6438
+ int64_t ne0,
6439
+ int64_t ne1,
6440
+ int64_t ne2,
6441
+ int64_t ne3) {
6442
+ GGML_ASSERT(ggml_nelements(a) == (ne0*ne1*ne2*ne3));
6443
+
6444
+ bool is_node = false;
6445
+
6446
+ struct ggml_tensor * result = ggml_new_tensor_4d(ctx, a->type, ne0, ne1, ne2, ne3);
6447
+ ggml_format_name(result, "%s (cont)", a->name);
6448
+
6449
+ result->op = GGML_OP_CONT;
6450
+ result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
6451
+ result->src[0] = a;
6452
+
6453
+ return result;
6454
+ }
6455
+
6456
+
6409
6457
// ggml_reshape
6410
6458
6411
6459
struct ggml_tensor * ggml_reshape(
0 commit comments